diff --git a/newdocs/src/changelog.md b/newdocs/src/changelog.md index c5205e88..2ce7b483 100644 --- a/newdocs/src/changelog.md +++ b/newdocs/src/changelog.md @@ -34,6 +34,8 @@ - fixed SNAFU1 for MD031 fix mode, assert - [Issue 1344](https://github.com/jackdewinter/pymarkdown/issues/1344) - fixed SNAFU6 for MD031 fix mode, assert +- [Issue 1346](https://github.com/jackdewinter/pymarkdown/issues/1346) + - fixed SNAFU8 for MD031 fix mode, assert ### Changed diff --git a/publish/coverage.json b/publish/coverage.json index 21cc0e2c..f650e6bc 100644 --- a/publish/coverage.json +++ b/publish/coverage.json @@ -2,12 +2,12 @@ "projectName": "pymarkdown", "reportSource": "pytest", "branchLevel": { - "totalMeasured": 8017, - "totalCovered": 8017 + "totalMeasured": 8025, + "totalCovered": 8025 }, "lineLevel": { - "totalMeasured": 21664, - "totalCovered": 21664 + "totalMeasured": 21713, + "totalCovered": 21713 } } diff --git a/publish/test-results.json b/publish/test-results.json index c24128ef..4b509516 100644 --- a/publish/test-results.json +++ b/publish/test-results.json @@ -1367,7 +1367,7 @@ "totalTests": 655, "failedTests": 0, "errorTests": 0, - "skippedTests": 25, + "skippedTests": 24, "elapsedTimeInMilliseconds": 0 }, { diff --git a/pymarkdown/plugins/rule_md_031.py b/pymarkdown/plugins/rule_md_031.py index 1f940480..bb2733a4 100644 --- a/pymarkdown/plugins/rule_md_031.py +++ b/pymarkdown/plugins/rule_md_031.py @@ -134,9 +134,7 @@ def starting_new_file(self) -> None: self.__x4.clear() self.__x5.clear() - def __fix_spacing_special_case( - self, context: PluginScanContext, token: MarkdownToken - ) -> None: + def __xabc(self, context: PluginScanContext, token: MarkdownToken) -> None: assert ( self.__last_token is not None ), "Special case means at least a block token." @@ -182,6 +180,11 @@ def __fix_spacing_special_case( context, self.__last_token, token, replacement_tokens ) + def __fix_spacing_special_case( + self, context: PluginScanContext, token: MarkdownToken + ) -> None: + + self.__xabc(context, token) end_token = cast(EndMarkdownToken, self.__last_token) block_quote_start_token = cast( BlockQuoteMarkdownToken, end_token.start_markdown_token @@ -196,6 +199,96 @@ def __fix_spacing_special_case( ) ) + def __fix_spacing_special_case2( + self, context: PluginScanContext, token: MarkdownToken + ) -> None: + """ + This is another special case where we have to do things long-form. + """ + assert ( + self.__last_token is not None + ), "Special case means at least a block token." + assert ( + self.__last_token is not None + ), "Special case means at least a block token." + new_token = copy.deepcopy(token) + self.__fix_count += 1 + new_token.adjust_line_number(context, self.__fix_count) + + assert self.__second_last_token is not None + replacement_tokens = [ + BlankLineMarkdownToken( + extracted_whitespace="", + position_marker=PositionMarker(new_token.line_number - 1, 0, ""), + ), + self.__second_last_token, + self.__last_token, + new_token, + ] + self.register_replace_tokens_request( + context, self.__second_last_token, token, replacement_tokens + ) + + end_token = cast(EndMarkdownToken, self.__second_last_token) + stack_token = self.__leading_space_index_tracker.get_container_stack_item(-1) + assert stack_token == end_token.start_markdown_token + list_start_token = cast(ListStartMarkdownToken, stack_token) + assert ( + list_start_token.leading_spaces is not None + ), "At least one line should have been processed." + split_leading_spaces = list_start_token.leading_spaces.split("\n") + self.__container_adjustments[-1].append( + PendingContainerAdjustment( + len(split_leading_spaces) - 1, + split_leading_spaces[-1].rstrip(), + do_insert=False, + ) + ) + + end_token = cast(EndMarkdownToken, self.__last_token) + stack_token = self.__leading_space_index_tracker.get_container_stack_item(-2) + assert stack_token == end_token.start_markdown_token + block_quote_start_token = cast( + BlockQuoteMarkdownToken, end_token.start_markdown_token + ) + assert ( + block_quote_start_token.bleading_spaces is not None + ), "At least one line should have been processed." + split_bleading_spaces = block_quote_start_token.bleading_spaces.split("\n") + self.__container_adjustments[-2].append( + PendingContainerAdjustment( + len(split_bleading_spaces) - 1, + split_bleading_spaces[-1].rstrip(), + do_insert=False, + ) + ) + + stack_token = self.__leading_space_index_tracker.get_container_stack_item(-3) + list_start_token = cast(ListStartMarkdownToken, stack_token) + assert ( + list_start_token.leading_spaces is not None + ), "At least one line should have been processed." + split_leading_spaces = list_start_token.leading_spaces.split("\n") + self.__container_adjustments[-3].append( + PendingContainerAdjustment( + len(split_bleading_spaces) - 1, split_leading_spaces[-1] + ) + ) + + stack_token = self.__leading_space_index_tracker.get_container_stack_item(-4) + block_quote_start_token = cast(BlockQuoteMarkdownToken, stack_token) + assert ( + block_quote_start_token.bleading_spaces is not None + ), "At least one line should have been processed." + split_bleading_spaces = block_quote_start_token.bleading_spaces.split("\n") + self.__container_adjustments[-4].append( + PendingContainerAdjustment( + len(split_bleading_spaces) - 1, + split_bleading_spaces[-1], + do_special=True, + ) + ) + def __abc(self, context: PluginScanContext) -> Tuple[bool, int]: apply_rstrip = True delayed_container_x_adjustment = 0 @@ -459,8 +552,10 @@ def __fix_spacing_list_detect_special( # index -= last_closed_container_info.adjustment index -= adjust # SNAFU6? - # xx =self.__leading_space_index_tracker.get_tokens_list_leading_space_index(token) - selected_leading_space = split_spaces[index] + xx = self.__leading_space_index_tracker.get_tokens_list_leading_space_index( + token + ) + selected_leading_space = split_spaces[xx] if selected_leading_space.endswith(ParserLogger.blah_sequence): stack_index = initial_index - 1 while ( @@ -471,7 +566,7 @@ def __fix_spacing_list_detect_special( ): stack_index -= 1 assert stack_index >= 0 - return True, stack_index, index + return True, stack_index, xx return False, -1, -1 def __fix_spacing_list( @@ -881,11 +976,18 @@ def __apply_tailing_block_quote_fix( ) def __fix_spacing( - self, context: PluginScanContext, token: MarkdownToken, special_case: bool + self, + context: PluginScanContext, + token: MarkdownToken, + special_case: bool, + special_case_2: bool, ) -> None: if special_case: self.__fix_spacing_special_case(context, token) return + if special_case_2: + self.__fix_spacing_special_case2(context, token) + return at_least_one_container = ( self.__leading_space_index_tracker.in_at_least_one_container() ) @@ -930,7 +1032,11 @@ def __fix_spacing( self.__fix_spacing_with_else(context, token, new_token) def __handle_fenced_code_block( - self, context: PluginScanContext, token: MarkdownToken, special_case: bool + self, + context: PluginScanContext, + token: MarkdownToken, + special_case: bool, + special_case_2: bool, ) -> None: can_trigger = ( @@ -947,7 +1053,7 @@ def __handle_fenced_code_block( and can_trigger ): if context.in_fix_mode and not context.is_during_line_pass: - self.__fix_spacing(context, token, special_case) + self.__fix_spacing(context, token, special_case, special_case_2) else: self.report_next_token_error(context, token) @@ -1002,7 +1108,7 @@ def __handle_end_fenced_code_block( and can_trigger ): if context.in_fix_mode: - self.__fix_spacing(context, token, False) + self.__fix_spacing(context, token, False, False) else: assert self.__last_non_end_token line_number_delta, column_number_delta = self.__calculate_end_deltas() @@ -1119,13 +1225,9 @@ def __process_pending_container_end_tokens( del self.__container_x[-1] is_first = False - def __calculate_special_case( - self, context: PluginScanContext, token: MarkdownToken - ) -> bool: + def __calculate_special_case(self) -> bool: return bool( - context.in_fix_mode - and token.is_fenced_code_block - and self.__leading_space_index_tracker.get_container_stack_size() >= 2 + self.__leading_space_index_tracker.get_container_stack_size() >= 2 and self.__leading_space_index_tracker.get_container_stack_item( -1 ).is_block_quote_start @@ -1138,14 +1240,43 @@ def __calculate_special_case( and self.__second_last_token.is_paragraph_end ) + def __calculate_specials( + self, context: PluginScanContext, token: MarkdownToken + ) -> Tuple[bool, bool]: + + special_case = False + special_case_2 = False + if context.in_fix_mode and token.is_fenced_code_block: + special_case = self.__calculate_special_case() + if not special_case: + special_case_2 = ( + len(self.__x5) >= 3 + and self.__x5[-1].is_block_quote_end + and self.__x5[-2].is_list_end + and self.__x5[-3].is_paragraph_end + ) + if special_case_2: + special_case_2 = ( + self.__leading_space_index_tracker.get_container_stack_size() + >= 4 + and self.__leading_space_index_tracker.get_container_stack_item( + -3 + ).is_list_start + and self.__leading_space_index_tracker.get_container_stack_item( + -4 + ).is_block_quote_start + ) + + return special_case, special_case_2 + def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None: """ Event that a new token is being processed. """ - special_case = self.__calculate_special_case(context, token) + special_case, special_case_2 = self.__calculate_specials(context, token) if not token.is_end_token or token.is_end_of_stream: - if not special_case: + if not (special_case or special_case_2): self.__process_pending_container_end_tokens(context, token) if self.__end_fenced_code_block_token: self.__handle_end_fenced_code_block(context, token) @@ -1157,8 +1288,10 @@ def next_token(self, context: PluginScanContext, token: MarkdownToken) -> None: elif token.is_block_quote_end or token.is_list_end: self.__leading_space_index_tracker.register_container_end(token) elif token.is_fenced_code_block: - self.__handle_fenced_code_block(context, token, special_case) - if special_case: + self.__handle_fenced_code_block( + context, token, special_case, special_case_2 + ) + if special_case or special_case_2: self.__process_pending_container_end_tokens(context, token) elif token.is_fenced_code_block_end: self.__end_fenced_code_block_token = cast(EndMarkdownToken, token) diff --git a/test/rules/test_md031.py b/test/rules/test_md031.py index 775b844c..0f509dda 100644 --- a/test/rules/test_md031.py +++ b/test/rules/test_md031.py @@ -6578,10 +6578,10 @@ scan_expected_output="""{temp_source_path}:5:5: MD031: Fenced code blocks should be surrounded by blank lines (blanks-around-fences) {temp_source_path}:7:5: MD031: Fenced code blocks should be surrounded by blank lines (blanks-around-fences) """, - disable_rules="md032", + disable_rules="md032,md027", use_debug=True, - use_fix_debug=True, - mark_fix_as_skipped=skip_fix_bad_markdown, + # use_fix_debug=True, + # mark_fix_as_skipped=skip_fix_bad_markdown, fix_expected_file_contents="""> + > ----- > > + list 1 > > list 2 @@ -6803,6 +6803,7 @@ {temp_source_path}:8:1: MD031: Fenced code blocks should be surrounded by blank lines (blanks-around-fences) """, disable_rules="md032", + use_debug=True, fix_expected_file_contents="""> + > ----- > > + list 1 > > list 2 @@ -6948,6 +6949,7 @@ disable_rules="md032", use_debug=True, mark_fix_as_skipped=skip_fix_bad_markdown, + use_fix_debug=True, fix_expected_file_contents="""> + + ----- > > block 1 > > block 2 diff --git a/test/test_markdown_extra.py b/test/test_markdown_extra.py index 2dd94dab..e6307bb3 100644 --- a/test/test_markdown_extra.py +++ b/test/test_markdown_extra.py @@ -16226,11 +16226,6 @@ def test_extra_999(): # ^^^^^^^^^ # AssertionError -# SNAFU8 -# bad_fenced_block_in_block_quote_in_list_in_block_quote_with_previous_list_double_drop_x -# File "C:\enlistments\pymarkdown\pymarkdown\plugins\rule_md_031.py", line 463, in __fix_spacing_list_detect_special -# selected_leading_space = split_spaces[index] - # SNAFU9 # bad_fenced_block_in_list_in_list_in_block_quote_with_previous_block_double_drop # File "c:\enlistments\pymarkdown\pymarkdown\transform_markdown\transform_list_block.py", line 773, in __rehydrate_list_start_deep