From 86a0eccaca51b86a0a10140b901a06297ed26549 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 20 Nov 2024 17:55:45 +0100 Subject: [PATCH 01/17] First version of GrammarSnippetDirective, copied from an earlier branch Co-authored-by: Blaise Pabon Co-authored-by: William Ferreira --- Doc/tools/extensions/grammar_snippet.py | 134 ++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 Doc/tools/extensions/grammar_snippet.py diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py new file mode 100644 index 00000000000000..1885e0b6dbed28 --- /dev/null +++ b/Doc/tools/extensions/grammar_snippet.py @@ -0,0 +1,134 @@ + +class GrammarSnippetDirective(SphinxDirective): + """Transform a grammar-snippet directive to a Sphinx productionlist + + That is, turn something like: + + .. grammar-snippet:: file + :group: python-grammar + :generated-by: Tools/peg_generator/docs_generator.py + + file: (NEWLINE | statement)* + + into something like: + + .. productionlist:: python-grammar + file: (NEWLINE | statement)* + + The custom directive is needed because Sphinx's `productionlist` does + not support options. + """ + has_content = True + option_spec = { + 'group': directives.unchanged, + 'generated-by': directives.unchanged, + 'diagrams': directives.unchanged, + } + + # Arguments are used by the tool that generates grammar-snippet, + # this Directive ignores them. + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = True + + def run(self): + group_name = self.options['group'] + + rawsource = ''' + # Docutils elements have a `rawsource` attribute that is supposed to be + # set to the original ReST source. + # Sphinx does the following with it: + # - if it's empty, set it to `self.astext()` + # - if it matches `self.astext()` when generating the output, + # apply syntax highlighting (which is based on the plain-text content + # and thus discards internal formatting, like references). + # To get around this, we set it to this fake (and very non-empty) + # string! + ''' + + literal = nodes.literal_block( + rawsource, + '', + # TODO: Use a dedicated CSS class here and for strings, + # and add it to the theme too + classes=['highlight'], + ) + + grammar_re = re.compile( + """ + (?P^[a-zA-Z0-9_]+) # identifier at start of line + (?=:) # ... followed by a colon + | + [`](?P[a-zA-Z0-9_]+)[`] # identifier in backquotes + | + (?P'[^']*') # string in 'quotes' + | + (?P"[^"]*") # string in "quotes" + """, + re.VERBOSE, + ) + + for line in self.content: + last_pos = 0 + for match in grammar_re.finditer(line): + # Handle text between matches + if match.start() > last_pos: + literal += nodes.Text(line[last_pos:match.start()]) + last_pos = match.end() + + # Handle matches + groupdict = { + name: content + for name, content in match.groupdict().items() + if content is not None + } + match groupdict: + case {'rule_name': name}: + name_node = addnodes.literal_strong() + + # Cargo-culted magic to make `name_node` a link target + # similar to Sphinx `production`: + domain = self.env.domains['std'] + obj_name = f"{group_name}:{name}" + prefix = f'grammar-token-{group_name}' + node_id = make_id(self.env, self.state.document, prefix, name) + name_node['ids'].append(node_id) + self.state.document.note_implicit_target(name_node, name_node) + domain.note_object('token', obj_name, node_id, location=name_node) + + text_node = nodes.Text(name) + name_node += text_node + literal += name_node + case {'rule_ref': name}: + ref_node = addnodes.pending_xref( + name, + reftype="token", + refdomain="std", + reftarget=f"{group_name}:{name}", + ) + ref_node += nodes.Text(name) + literal += ref_node + case {'single_quoted': name} | {'double_quoted': name}: + string_node = nodes.inline(classes=['nb']) + string_node += nodes.Text(name) + literal += string_node + case _: + raise ValueError('unhandled match') + literal += nodes.Text(line[last_pos:] + '\n') + + + node = nodes.paragraph( + '', '', + literal, + ) + + content = StringList() + for rule_name in self.options['diagrams'].split(): + content.append('', source=__file__) + content.append(f'``{rule_name}``:', source=__file__) + content.append('', source=__file__) + content.append(f'.. image:: diagrams/{rule_name}.svg', source=__file__) + + self.state.nested_parse(content, 0, node) + + return [node] From f6ffb211dffbf5366556f1f5ed8f8b7030d0bab2 Mon Sep 17 00:00:00 2001 From: blaisep Date: Wed, 4 Dec 2024 11:18:56 -0500 Subject: [PATCH 02/17] Add a grammar-snippet directive --- Doc/conf.py | 2 + Doc/reference/toplevel_components.rst | 8 +++- Doc/tools/extensions/grammar_snippet.py | 57 ++++++++++++------------- 3 files changed, 36 insertions(+), 31 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py index 738c9901eef06f..949c751bbab0ab 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -20,6 +20,7 @@ # Python specific content from Doc/Tools/extensions/pyspecific.py from pyspecific import SOURCE_URI + # General configuration # --------------------- @@ -29,6 +30,7 @@ 'availability', 'c_annotations', 'glossary_search', + 'grammar_snippet', 'lexers', 'pyspecific', 'sphinx.ext.coverage', diff --git a/Doc/reference/toplevel_components.rst b/Doc/reference/toplevel_components.rst index dd3d3d6878e289..6019682c40e5d1 100644 --- a/Doc/reference/toplevel_components.rst +++ b/Doc/reference/toplevel_components.rst @@ -66,7 +66,9 @@ File input All input read from non-interactive files has the same form: -.. productionlist:: python-grammar +.. grammar-snippet:: + :group: python-grammar + file_input: (NEWLINE | `statement`)* This syntax is used in the following situations: @@ -85,7 +87,9 @@ Interactive input Input in interactive mode is parsed using the following grammar: -.. productionlist:: python-grammar +... grammar-snippet:: + :group: python-grammar + interactive_input: [`stmt_list`] NEWLINE | `compound_stmt` NEWLINE Note that a (top-level) compound statement must be followed by a blank line in diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 1885e0b6dbed28..f41674a18daaf4 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -1,3 +1,11 @@ +import re +from docutils import nodes +from docutils.parsers.rst import directives + +from sphinx import addnodes +from sphinx.util.docutils import SphinxDirective +from sphinx.util.nodes import make_id + class GrammarSnippetDirective(SphinxDirective): """Transform a grammar-snippet directive to a Sphinx productionlist @@ -6,35 +14,31 @@ class GrammarSnippetDirective(SphinxDirective): .. grammar-snippet:: file :group: python-grammar - :generated-by: Tools/peg_generator/docs_generator.py file: (NEWLINE | statement)* - into something like: - - .. productionlist:: python-grammar - file: (NEWLINE | statement)* + into something similar to Sphinx productionlist, but better suited + for our needs: + - Instead of `::=`, use a colon, as in `Grammar/python.gram` + - Show the listing almost as is, with no auto-aligment. + The only special character is the backtick, which marks tokens. - The custom directive is needed because Sphinx's `productionlist` does - not support options. + Unlike Sphinx's productionlist, this directive supports options. + The "group" must be given as an option. """ has_content = True option_spec = { 'group': directives.unchanged, - 'generated-by': directives.unchanged, - 'diagrams': directives.unchanged, } - # Arguments are used by the tool that generates grammar-snippet, - # this Directive ignores them. - required_arguments = 1 - optional_arguments = 0 + # We currently ignore arguments. + required_arguments = 0 + optional_arguments = 1 final_argument_whitespace = True def run(self): group_name = self.options['group'] - rawsource = ''' # Docutils elements have a `rawsource` attribute that is supposed to be # set to the original ReST source. # Sphinx does the following with it: @@ -42,14 +46,13 @@ def run(self): # - if it matches `self.astext()` when generating the output, # apply syntax highlighting (which is based on the plain-text content # and thus discards internal formatting, like references). - # To get around this, we set it to this fake (and very non-empty) - # string! - ''' + # To get around this, we set it to this non-empty string: + rawsource = 'You should not see this.' literal = nodes.literal_block( rawsource, '', - # TODO: Use a dedicated CSS class here and for strings, + # TODO: Use a dedicated CSS class here and for strings. # and add it to the theme too classes=['highlight'], ) @@ -87,7 +90,9 @@ def run(self): name_node = addnodes.literal_strong() # Cargo-culted magic to make `name_node` a link target - # similar to Sphinx `production`: + # similar to Sphinx `production`. + # This needs to be the same as what Sphinx does + # to avoid breaking existing links. domain = self.env.domains['std'] obj_name = f"{group_name}:{name}" prefix = f'grammar-token-{group_name}' @@ -116,19 +121,13 @@ def run(self): raise ValueError('unhandled match') literal += nodes.Text(line[last_pos:] + '\n') - node = nodes.paragraph( '', '', literal, ) - content = StringList() - for rule_name in self.options['diagrams'].split(): - content.append('', source=__file__) - content.append(f'``{rule_name}``:', source=__file__) - content.append('', source=__file__) - content.append(f'.. image:: diagrams/{rule_name}.svg', source=__file__) - - self.state.nested_parse(content, 0, node) - return [node] + +def setup(app): + app.add_directive('grammar-snippet', GrammarSnippetDirective) + return {'version': '1.0', 'parallel_read_safe': True} From 5cfc7019b6f98132bfcbd813028f8ee2a6732c4b Mon Sep 17 00:00:00 2001 From: blaisep Date: Wed, 4 Dec 2024 12:02:49 -0500 Subject: [PATCH 03/17] Remove monkey patch for production list and Add CompatProductionList to conserve the productionlist usage in the docs. --- Doc/conf.py | 2 +- Doc/tools/extensions/grammar_snippet.py | 198 +++++++++++++----------- Doc/tools/extensions/pyspecific.py | 6 - 3 files changed, 109 insertions(+), 97 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py index 949c751bbab0ab..d498fedd5275fd 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -30,12 +30,12 @@ 'availability', 'c_annotations', 'glossary_search', - 'grammar_snippet', 'lexers', 'pyspecific', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.extlinks', + 'grammar_snippet', ] # Skip if downstream redistributors haven't installed them diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index f41674a18daaf4..6ac0cee577917c 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -6,6 +6,98 @@ from sphinx.util.docutils import SphinxDirective from sphinx.util.nodes import make_id +def make_snippet(directive, options, content): + group_name = options['group'] + + # Docutils elements have a `rawsource` attribute that is supposed to be + # set to the original ReST source. + # Sphinx does the following with it: + # - if it's empty, set it to `self.astext()` + # - if it matches `self.astext()` when generating the output, + # apply syntax highlighting (which is based on the plain-text content + # and thus discards internal formatting, like references). + # To get around this, we set it to this non-empty string: + rawsource = 'You should not see this.' + + literal = nodes.literal_block( + rawsource, + '', + # TODO: Use a dedicated CSS class here and for strings. + # and add it to the theme too + classes=['highlight'], + ) + + grammar_re = re.compile( + """ + (?P^[a-zA-Z0-9_]+) # identifier at start of line + (?=:) # ... followed by a colon + | + [`](?P[a-zA-Z0-9_]+)[`] # identifier in backquotes + | + (?P'[^']*') # string in 'quotes' + | + (?P"[^"]*") # string in "quotes" + """, + re.VERBOSE, + ) + + for line in content: + last_pos = 0 + for match in grammar_re.finditer(line): + # Handle text between matches + if match.start() > last_pos: + literal += nodes.Text(line[last_pos:match.start()]) + last_pos = match.end() + + # Handle matches + groupdict = { + name: content + for name, content in match.groupdict().items() + if content is not None + } + match groupdict: + case {'rule_name': name}: + name_node = addnodes.literal_strong() + + # Cargo-culted magic to make `name_node` a link target + # similar to Sphinx `production`. + # This needs to be the same as what Sphinx does + # to avoid breaking existing links. + domain = directive.env.domains['std'] + obj_name = f"{group_name}:{name}" + prefix = f'grammar-token-{group_name}' + node_id = make_id(directive.env, directive.state.document, prefix, name) + name_node['ids'].append(node_id) + directive.state.document.note_implicit_target(name_node, name_node) + domain.note_object('token', obj_name, node_id, location=name_node) + + text_node = nodes.Text(name) + name_node += text_node + literal += name_node + case {'rule_ref': name}: + ref_node = addnodes.pending_xref( + name, + reftype="token", + refdomain="std", + reftarget=f"{group_name}:{name}", + ) + ref_node += nodes.Text(name) + literal += ref_node + case {'single_quoted': name} | {'double_quoted': name}: + string_node = nodes.inline(classes=['nb']) + string_node += nodes.Text(name) + literal += string_node + case _: + raise ValueError('unhandled match') + literal += nodes.Text(line[last_pos:] + '\n') + + node = nodes.paragraph( + '', '', + literal, + ) + + return [node] + class GrammarSnippetDirective(SphinxDirective): """Transform a grammar-snippet directive to a Sphinx productionlist @@ -37,97 +129,23 @@ class GrammarSnippetDirective(SphinxDirective): final_argument_whitespace = True def run(self): - group_name = self.options['group'] - - # Docutils elements have a `rawsource` attribute that is supposed to be - # set to the original ReST source. - # Sphinx does the following with it: - # - if it's empty, set it to `self.astext()` - # - if it matches `self.astext()` when generating the output, - # apply syntax highlighting (which is based on the plain-text content - # and thus discards internal formatting, like references). - # To get around this, we set it to this non-empty string: - rawsource = 'You should not see this.' - - literal = nodes.literal_block( - rawsource, - '', - # TODO: Use a dedicated CSS class here and for strings. - # and add it to the theme too - classes=['highlight'], - ) - - grammar_re = re.compile( - """ - (?P^[a-zA-Z0-9_]+) # identifier at start of line - (?=:) # ... followed by a colon - | - [`](?P[a-zA-Z0-9_]+)[`] # identifier in backquotes - | - (?P'[^']*') # string in 'quotes' - | - (?P"[^"]*") # string in "quotes" - """, - re.VERBOSE, - ) - - for line in self.content: - last_pos = 0 - for match in grammar_re.finditer(line): - # Handle text between matches - if match.start() > last_pos: - literal += nodes.Text(line[last_pos:match.start()]) - last_pos = match.end() - - # Handle matches - groupdict = { - name: content - for name, content in match.groupdict().items() - if content is not None - } - match groupdict: - case {'rule_name': name}: - name_node = addnodes.literal_strong() - - # Cargo-culted magic to make `name_node` a link target - # similar to Sphinx `production`. - # This needs to be the same as what Sphinx does - # to avoid breaking existing links. - domain = self.env.domains['std'] - obj_name = f"{group_name}:{name}" - prefix = f'grammar-token-{group_name}' - node_id = make_id(self.env, self.state.document, prefix, name) - name_node['ids'].append(node_id) - self.state.document.note_implicit_target(name_node, name_node) - domain.note_object('token', obj_name, node_id, location=name_node) - - text_node = nodes.Text(name) - name_node += text_node - literal += name_node - case {'rule_ref': name}: - ref_node = addnodes.pending_xref( - name, - reftype="token", - refdomain="std", - reftarget=f"{group_name}:{name}", - ) - ref_node += nodes.Text(name) - literal += ref_node - case {'single_quoted': name} | {'double_quoted': name}: - string_node = nodes.inline(classes=['nb']) - string_node += nodes.Text(name) - literal += string_node - case _: - raise ValueError('unhandled match') - literal += nodes.Text(line[last_pos:] + '\n') - - node = nodes.paragraph( - '', '', - literal, - ) - - return [node] + return make_snippet(self, self.options, self.content) + + +class CompatProductionList(SphinxDirective): + has_content = True + option_spec = {} + + # We currently ignore arguments. + required_arguments = 1 + + def run(self): + options = {'group': self.arguments[0]} + content = self.content + return make_snippet(self, options, content) + def setup(app): app.add_directive('grammar-snippet', GrammarSnippetDirective) + app.add_directive('productionlist', CompatProductionList, override=True) return {'version': '1.0', 'parallel_read_safe': True} diff --git a/Doc/tools/extensions/pyspecific.py b/Doc/tools/extensions/pyspecific.py index f4df7ec0839339..1dbb4912e80373 100644 --- a/Doc/tools/extensions/pyspecific.py +++ b/Doc/tools/extensions/pyspecific.py @@ -41,12 +41,6 @@ Body.enum.converters['lowerroman'] = \ Body.enum.converters['upperroman'] = lambda x: None -# monkey-patch the productionlist directive to allow hyphens in group names -# https://github.com/sphinx-doc/sphinx/issues/11854 -from sphinx.domains import std - -std.token_re = re.compile(r'`((~?[\w-]*:)?\w+)`') - # backport :no-index: PyModule.option_spec['no-index'] = directives.flag From f0cbed84c6667c7b72fb5358861a948aaa74b17d Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 11 Dec 2024 16:22:38 +0100 Subject: [PATCH 04/17] Fix ReST grammar --- Doc/reference/toplevel_components.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Doc/reference/toplevel_components.rst b/Doc/reference/toplevel_components.rst index 6019682c40e5d1..f155fafbe4d738 100644 --- a/Doc/reference/toplevel_components.rst +++ b/Doc/reference/toplevel_components.rst @@ -87,7 +87,7 @@ Interactive input Input in interactive mode is parsed using the following grammar: -... grammar-snippet:: +.. grammar-snippet:: :group: python-grammar interactive_input: [`stmt_list`] NEWLINE | `compound_stmt` NEWLINE From 9139e25bf3da601fb6f2249010051b5ec157b905 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 11 Dec 2024 17:05:53 +0100 Subject: [PATCH 05/17] Complete the CompatProductionList class Co-Authored-By: bswck --- Doc/tools/extensions/grammar_snippet.py | 28 ++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 6ac0cee577917c..81a44ee2eb05fa 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -133,19 +133,33 @@ def run(self): class CompatProductionList(SphinxDirective): - has_content = True - option_spec = {} - - # We currently ignore arguments. + has_content = False required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = True + option_spec = {} def run(self): - options = {'group': self.arguments[0]} - content = self.content + # The "content" of a productionlist is actually the first and only + # argument. The first line is the group; the rest is the content lines. + lines = self.arguments[0].splitlines() + group = lines[0].strip() + options = {'group': group} + # We assume there's a colon in each line; align on it. + align_column = max(line.index(':') for line in lines[1:]) + 1 + content = [] + for line in lines[1:]: + rule_name, colon, text = line.partition(':') + rule_name = rule_name.strip() + if rule_name: + name_part = rule_name + ':' + else: + name_part = '' + content.append(f'{name_part:<{align_column}}{text}') return make_snippet(self, options, content) def setup(app): app.add_directive('grammar-snippet', GrammarSnippetDirective) - app.add_directive('productionlist', CompatProductionList, override=True) + app.add_directive_to_domain('std', 'productionlist', CompatProductionList, override=True) return {'version': '1.0', 'parallel_read_safe': True} From b10d78b094eb815c162b97b4dcc381de2f072757 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 11 Dec 2024 17:13:58 +0100 Subject: [PATCH 06/17] Adjust docstrings & move GrammarSnippetDirective class up --- Doc/tools/extensions/grammar_snippet.py | 80 +++++++++++++++---------- 1 file changed, 48 insertions(+), 32 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 81a44ee2eb05fa..6d385ba2906762 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -6,7 +6,49 @@ from sphinx.util.docutils import SphinxDirective from sphinx.util.nodes import make_id + +class GrammarSnippetDirective(SphinxDirective): + """Transform a grammar-snippet directive to a Sphinx literal_block + + That is, turn something like: + + .. grammar-snippet:: file + :group: python-grammar + + file: (NEWLINE | statement)* + + into something similar to Sphinx productionlist, but better suited + for our needs: + - Instead of `::=`, use a colon, as in `Grammar/python.gram` + - Show the listing almost as is, with no auto-aligment. + The only special character is the backtick, which marks tokens. + + Unlike Sphinx's productionlist, this directive supports options. + The "group" must be given as a named option. + The content must be preceded by a blank line (like with most ReST + directives). + """ + has_content = True + option_spec = { + 'group': directives.unchanged, + } + + # We currently ignore arguments. + required_arguments = 0 + optional_arguments = 1 + final_argument_whitespace = True + + def run(self): + return make_snippet(self, self.options, self.content) + + def make_snippet(directive, options, content): + """Create a literal block from options & content. + + This implements the common functionality for GrammarSnippetDirective + and CompatProductionList. + """ + group_name = options['group'] # Docutils elements have a `rawsource` attribute that is supposed to be @@ -99,40 +141,14 @@ def make_snippet(directive, options, content): return [node] -class GrammarSnippetDirective(SphinxDirective): - """Transform a grammar-snippet directive to a Sphinx productionlist - - That is, turn something like: - - .. grammar-snippet:: file - :group: python-grammar - - file: (NEWLINE | statement)* - - into something similar to Sphinx productionlist, but better suited - for our needs: - - Instead of `::=`, use a colon, as in `Grammar/python.gram` - - Show the listing almost as is, with no auto-aligment. - The only special character is the backtick, which marks tokens. +class CompatProductionList(SphinxDirective): + """Create grammar snippets from ReST productionlist syntax - Unlike Sphinx's productionlist, this directive supports options. - The "group" must be given as an option. + This is intended to be a transitional directive, used while we switch + from productionlist to grammar-snippet. + It makes existing docs that use the ReST syntax look like grammar-snippet, + as much as possible. """ - has_content = True - option_spec = { - 'group': directives.unchanged, - } - - # We currently ignore arguments. - required_arguments = 0 - optional_arguments = 1 - final_argument_whitespace = True - - def run(self): - return make_snippet(self, self.options, self.content) - - -class CompatProductionList(SphinxDirective): has_content = False required_arguments = 1 optional_arguments = 0 From a403fb6219df53d1042895ecc0d01e4f3b3a7406 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 11 Dec 2024 18:02:26 +0100 Subject: [PATCH 07/17] Run Ruff --- Doc/tools/extensions/grammar_snippet.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 6d385ba2906762..7091ccf5b98ff4 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -28,6 +28,7 @@ class GrammarSnippetDirective(SphinxDirective): The content must be preceded by a blank line (like with most ReST directives). """ + has_content = True option_spec = { 'group': directives.unchanged, @@ -88,7 +89,7 @@ def make_snippet(directive, options, content): for match in grammar_re.finditer(line): # Handle text between matches if match.start() > last_pos: - literal += nodes.Text(line[last_pos:match.start()]) + literal += nodes.Text(line[last_pos : match.start()]) last_pos = match.end() # Handle matches @@ -108,10 +109,16 @@ def make_snippet(directive, options, content): domain = directive.env.domains['std'] obj_name = f"{group_name}:{name}" prefix = f'grammar-token-{group_name}' - node_id = make_id(directive.env, directive.state.document, prefix, name) + node_id = make_id( + directive.env, directive.state.document, prefix, name + ) name_node['ids'].append(node_id) - directive.state.document.note_implicit_target(name_node, name_node) - domain.note_object('token', obj_name, node_id, location=name_node) + directive.state.document.note_implicit_target( + name_node, name_node + ) + domain.note_object( + 'token', obj_name, node_id, location=name_node + ) text_node = nodes.Text(name) name_node += text_node @@ -134,7 +141,8 @@ def make_snippet(directive, options, content): literal += nodes.Text(line[last_pos:] + '\n') node = nodes.paragraph( - '', '', + '', + '', literal, ) @@ -149,6 +157,7 @@ class CompatProductionList(SphinxDirective): It makes existing docs that use the ReST syntax look like grammar-snippet, as much as possible. """ + has_content = False required_arguments = 1 optional_arguments = 0 @@ -177,5 +186,7 @@ def run(self): def setup(app): app.add_directive('grammar-snippet', GrammarSnippetDirective) - app.add_directive_to_domain('std', 'productionlist', CompatProductionList, override=True) + app.add_directive_to_domain( + 'std', 'productionlist', CompatProductionList, override=True + ) return {'version': '1.0', 'parallel_read_safe': True} From 074c1891ebe732f12d8d3214dc0881ef34337879 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 11 Dec 2024 18:07:32 +0100 Subject: [PATCH 08/17] Run Ruff again with different flags --- Doc/conf.py | 1 - Doc/tools/extensions/grammar_snippet.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py index 78e3de921f6b80..8dbf243b09b353 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -20,7 +20,6 @@ # Python specific content from Doc/Tools/extensions/pyspecific.py from pyspecific import SOURCE_URI - # General configuration # --------------------- diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 7091ccf5b98ff4..4efa1fb06f0719 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -1,7 +1,7 @@ import re + from docutils import nodes from docutils.parsers.rst import directives - from sphinx import addnodes from sphinx.util.docutils import SphinxDirective from sphinx.util.nodes import make_id @@ -174,7 +174,7 @@ def run(self): align_column = max(line.index(':') for line in lines[1:]) + 1 content = [] for line in lines[1:]: - rule_name, colon, text = line.partition(':') + rule_name, _colon, text = line.partition(':') rule_name = rule_name.strip() if rule_name: name_part = rule_name + ':' From ea9e2f093d0cd2c39611dc852dbe53b261065ba4 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 22 Jan 2025 16:42:16 +0100 Subject: [PATCH 09/17] Turn the common function into a class with method; break make_link_to_token out --- Doc/tools/extensions/grammar_snippet.py | 226 ++++++++++++------------ 1 file changed, 116 insertions(+), 110 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 4efa1fb06f0719..a3f94c10741783 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -7,7 +7,119 @@ from sphinx.util.nodes import make_id -class GrammarSnippetDirective(SphinxDirective): + +class GrammarSnippetBase(SphinxDirective): + """Common functionality for GrammarSnippetDirective & CompatProductionList. + """ + # The option/argument handling is left to the individual classes. + + def make_grammar_snippet(self, options, content): + """Create a literal block from options & content.""" + + group_name = options['group'] + + # Docutils elements have a `rawsource` attribute that is supposed to be + # set to the original ReST source. + # Sphinx does the following with it: + # - if it's empty, set it to `self.astext()` + # - if it matches `self.astext()` when generating the output, + # apply syntax highlighting (which is based on the plain-text content + # and thus discards internal formatting, like references). + # To get around this, we set it to this non-empty string: + rawsource = 'You should not see this.' + + literal = nodes.literal_block( + rawsource, + '', + # TODO: Use a dedicated CSS class here and for strings. + # and add it to the theme too + classes=['highlight'], + ) + + grammar_re = re.compile( + """ + (?P^[a-zA-Z0-9_]+) # identifier at start of line + (?=:) # ... followed by a colon + | + [`](?P[a-zA-Z0-9_]+)[`] # identifier in backquotes + | + (?P'[^']*') # string in 'quotes' + | + (?P"[^"]*") # string in "quotes" + """, + re.VERBOSE, + ) + + for line in content: + last_pos = 0 + for match in grammar_re.finditer(line): + # Handle text between matches + if match.start() > last_pos: + literal += nodes.Text(line[last_pos : match.start()]) + last_pos = match.end() + + # Handle matches + groupdict = { + name: content + for name, content in match.groupdict().items() + if content is not None + } + match groupdict: + case {'rule_name': name}: + literal += self.make_link_to_token() + case {'rule_ref': name}: + ref_node = addnodes.pending_xref( + name, + reftype="token", + refdomain="std", + reftarget=f"{group_name}:{name}", + ) + ref_node += nodes.Text(name) + literal += ref_node + case {'single_quoted': name} | {'double_quoted': name}: + string_node = nodes.inline(classes=['nb']) + string_node += nodes.Text(name) + literal += string_node + case _: + raise ValueError('unhandled match') + literal += nodes.Text(line[last_pos:] + '\n') + + node = nodes.paragraph( + '', + '', + literal, + ) + + return [node] + + def make_link_to_token(self, group_name, name): + """Return a literal node that links to the given grammar token""" + name_node = addnodes.literal_strong() + + # Cargo-culted magic to make `name_node` a link target + # similar to Sphinx `production`. + # This needs to be the same as what Sphinx does + # to avoid breaking existing links. + domain = self.env.domains['std'] + obj_name = f"{group_name}:{name}" + prefix = f'grammar-token-{group_name}' + node_id = make_id( + self.env, self.state.document, prefix, name + ) + name_node['ids'].append(node_id) + self.state.document.note_implicit_target( + name_node, name_node + ) + domain.note_object( + 'token', obj_name, node_id, location=name_node + ) + + text_node = nodes.Text(name) + name_node += text_node + return name_node + + +class GrammarSnippetDirective(GrammarSnippetBase): """Transform a grammar-snippet directive to a Sphinx literal_block That is, turn something like: @@ -40,116 +152,10 @@ class GrammarSnippetDirective(SphinxDirective): final_argument_whitespace = True def run(self): - return make_snippet(self, self.options, self.content) - - -def make_snippet(directive, options, content): - """Create a literal block from options & content. - - This implements the common functionality for GrammarSnippetDirective - and CompatProductionList. - """ - - group_name = options['group'] - - # Docutils elements have a `rawsource` attribute that is supposed to be - # set to the original ReST source. - # Sphinx does the following with it: - # - if it's empty, set it to `self.astext()` - # - if it matches `self.astext()` when generating the output, - # apply syntax highlighting (which is based on the plain-text content - # and thus discards internal formatting, like references). - # To get around this, we set it to this non-empty string: - rawsource = 'You should not see this.' - - literal = nodes.literal_block( - rawsource, - '', - # TODO: Use a dedicated CSS class here and for strings. - # and add it to the theme too - classes=['highlight'], - ) - - grammar_re = re.compile( - """ - (?P^[a-zA-Z0-9_]+) # identifier at start of line - (?=:) # ... followed by a colon - | - [`](?P[a-zA-Z0-9_]+)[`] # identifier in backquotes - | - (?P'[^']*') # string in 'quotes' - | - (?P"[^"]*") # string in "quotes" - """, - re.VERBOSE, - ) - - for line in content: - last_pos = 0 - for match in grammar_re.finditer(line): - # Handle text between matches - if match.start() > last_pos: - literal += nodes.Text(line[last_pos : match.start()]) - last_pos = match.end() - - # Handle matches - groupdict = { - name: content - for name, content in match.groupdict().items() - if content is not None - } - match groupdict: - case {'rule_name': name}: - name_node = addnodes.literal_strong() - - # Cargo-culted magic to make `name_node` a link target - # similar to Sphinx `production`. - # This needs to be the same as what Sphinx does - # to avoid breaking existing links. - domain = directive.env.domains['std'] - obj_name = f"{group_name}:{name}" - prefix = f'grammar-token-{group_name}' - node_id = make_id( - directive.env, directive.state.document, prefix, name - ) - name_node['ids'].append(node_id) - directive.state.document.note_implicit_target( - name_node, name_node - ) - domain.note_object( - 'token', obj_name, node_id, location=name_node - ) - - text_node = nodes.Text(name) - name_node += text_node - literal += name_node - case {'rule_ref': name}: - ref_node = addnodes.pending_xref( - name, - reftype="token", - refdomain="std", - reftarget=f"{group_name}:{name}", - ) - ref_node += nodes.Text(name) - literal += ref_node - case {'single_quoted': name} | {'double_quoted': name}: - string_node = nodes.inline(classes=['nb']) - string_node += nodes.Text(name) - literal += string_node - case _: - raise ValueError('unhandled match') - literal += nodes.Text(line[last_pos:] + '\n') - - node = nodes.paragraph( - '', - '', - literal, - ) - - return [node] + return self.make_grammar_snippet(self.options, self.content) -class CompatProductionList(SphinxDirective): +class CompatProductionList(GrammarSnippetBase): """Create grammar snippets from ReST productionlist syntax This is intended to be a transitional directive, used while we switch @@ -181,7 +187,7 @@ def run(self): else: name_part = '' content.append(f'{name_part:<{align_column}}{text}') - return make_snippet(self, options, content) + return self.make_grammar_snippet(options, content) def setup(app): From b2205a83f2f2f86350aada49150bed9c9900b59e Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 22 Jan 2025 16:50:42 +0100 Subject: [PATCH 10/17] Remove blank line --- Doc/tools/extensions/grammar_snippet.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index a3f94c10741783..3ba3016c35a674 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -7,7 +7,6 @@ from sphinx.util.nodes import make_id - class GrammarSnippetBase(SphinxDirective): """Common functionality for GrammarSnippetDirective & CompatProductionList. """ From 829e4f065e54316a6532bab3725ee89940600f4c Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 22 Jan 2025 16:52:38 +0100 Subject: [PATCH 11/17] Run `ruff format` --- Doc/tools/extensions/grammar_snippet.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 3ba3016c35a674..046c930b0d2846 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -8,8 +8,8 @@ class GrammarSnippetBase(SphinxDirective): - """Common functionality for GrammarSnippetDirective & CompatProductionList. - """ + """Common functionality for GrammarSnippetDirective & CompatProductionList.""" + # The option/argument handling is left to the individual classes. def make_grammar_snippet(self, options, content): @@ -102,16 +102,10 @@ def make_link_to_token(self, group_name, name): domain = self.env.domains['std'] obj_name = f"{group_name}:{name}" prefix = f'grammar-token-{group_name}' - node_id = make_id( - self.env, self.state.document, prefix, name - ) + node_id = make_id(self.env, self.state.document, prefix, name) name_node['ids'].append(node_id) - self.state.document.note_implicit_target( - name_node, name_node - ) - domain.note_object( - 'token', obj_name, node_id, location=name_node - ) + self.state.document.note_implicit_target(name_node, name_node) + domain.note_object('token', obj_name, node_id, location=name_node) text_node = nodes.Text(name) name_node += text_node From e57a37e716684e0af3c21646918d0a69acdb2e0a Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 22 Jan 2025 16:57:49 +0100 Subject: [PATCH 12/17] Add forgotten arguments --- Doc/tools/extensions/grammar_snippet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 046c930b0d2846..227778897f8e48 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -65,7 +65,7 @@ def make_grammar_snippet(self, options, content): } match groupdict: case {'rule_name': name}: - literal += self.make_link_to_token() + literal += self.make_link_to_token(group_name, name) case {'rule_ref': name}: ref_node = addnodes.pending_xref( name, From 27321430b6affde9140e71f2f18ce9fa1a831b17 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 29 Jan 2025 16:55:00 +0100 Subject: [PATCH 13/17] Use Sphinx's token_xrefs function for formatting the tokens --- Doc/tools/extensions/grammar_snippet.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 227778897f8e48..0d641b3480750e 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -5,6 +5,7 @@ from sphinx import addnodes from sphinx.util.docutils import SphinxDirective from sphinx.util.nodes import make_id +from sphinx.domains.std import token_xrefs class GrammarSnippetBase(SphinxDirective): @@ -36,11 +37,11 @@ def make_grammar_snippet(self, options, content): ) grammar_re = re.compile( - """ + r""" (?P^[a-zA-Z0-9_]+) # identifier at start of line (?=:) # ... followed by a colon | - [`](?P[a-zA-Z0-9_]+)[`] # identifier in backquotes + (?P`[^\s`]+`) # identifier in backquotes | (?P'[^']*') # string in 'quotes' | @@ -65,16 +66,9 @@ def make_grammar_snippet(self, options, content): } match groupdict: case {'rule_name': name}: - literal += self.make_link_to_token(group_name, name) - case {'rule_ref': name}: - ref_node = addnodes.pending_xref( - name, - reftype="token", - refdomain="std", - reftarget=f"{group_name}:{name}", - ) - ref_node += nodes.Text(name) - literal += ref_node + literal += self.make_link_target_for_token(group_name, name) + case {'rule_ref': ref_text}: + literal += token_xrefs(ref_text, group_name) case {'single_quoted': name} | {'double_quoted': name}: string_node = nodes.inline(classes=['nb']) string_node += nodes.Text(name) @@ -91,8 +85,8 @@ def make_grammar_snippet(self, options, content): return [node] - def make_link_to_token(self, group_name, name): - """Return a literal node that links to the given grammar token""" + def make_link_target_for_token(self, group_name, name): + """Return a literal node which is a link target for the given token""" name_node = addnodes.literal_strong() # Cargo-culted magic to make `name_node` a link target From ef3c552a3636dbdc20e4753f55b6c8cae89336c3 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 29 Jan 2025 17:41:10 +0100 Subject: [PATCH 14/17] Improve the highlight class injection --- Doc/tools/extensions/grammar_snippet.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 0d641b3480750e..0ebb5fa17d05f1 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -8,6 +8,14 @@ from sphinx.domains.std import token_xrefs +class snippet_literal_string(nodes.inline): + """Node for a string literal in a grammar snippet.""" + + def __init__(self) -> None: + # Use the Pygments highlight class for `Literal.String.Other` + super().__init__(classes=['sx']) + + class GrammarSnippetBase(SphinxDirective): """Common functionality for GrammarSnippetDirective & CompatProductionList.""" @@ -31,8 +39,6 @@ def make_grammar_snippet(self, options, content): literal = nodes.literal_block( rawsource, '', - # TODO: Use a dedicated CSS class here and for strings. - # and add it to the theme too classes=['highlight'], ) @@ -70,7 +76,7 @@ def make_grammar_snippet(self, options, content): case {'rule_ref': ref_text}: literal += token_xrefs(ref_text, group_name) case {'single_quoted': name} | {'double_quoted': name}: - string_node = nodes.inline(classes=['nb']) + string_node = snippet_literal_string() string_node += nodes.Text(name) literal += string_node case _: From c0c8432e4cbfcbb195605c34ec37d43e37b0b607 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 29 Jan 2025 17:48:59 +0100 Subject: [PATCH 15/17] Appease Ruff --- Doc/tools/extensions/grammar_snippet.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 0ebb5fa17d05f1..10be809bc7c97e 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -72,7 +72,9 @@ def make_grammar_snippet(self, options, content): } match groupdict: case {'rule_name': name}: - literal += self.make_link_target_for_token(group_name, name) + literal += self.make_link_target_for_token( + group_name, name + ) case {'rule_ref': ref_text}: literal += token_xrefs(ref_text, group_name) case {'single_quoted': name} | {'double_quoted': name}: From e2359b729c9ef90c49f71872789c09e5647f9aa7 Mon Sep 17 00:00:00 2001 From: Petr Viktorin Date: Wed, 29 Jan 2025 17:53:17 +0100 Subject: [PATCH 16/17] Appase Ruff again --- Doc/tools/extensions/grammar_snippet.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index 10be809bc7c97e..dc0f5a7c293ad9 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -3,12 +3,12 @@ from docutils import nodes from docutils.parsers.rst import directives from sphinx import addnodes +from sphinx.domains.std import token_xrefs from sphinx.util.docutils import SphinxDirective from sphinx.util.nodes import make_id -from sphinx.domains.std import token_xrefs -class snippet_literal_string(nodes.inline): +class SnippetStringNode(nodes.inline): """Node for a string literal in a grammar snippet.""" def __init__(self) -> None: @@ -78,7 +78,7 @@ def make_grammar_snippet(self, options, content): case {'rule_ref': ref_text}: literal += token_xrefs(ref_text, group_name) case {'single_quoted': name} | {'double_quoted': name}: - string_node = snippet_literal_string() + string_node = SnippetStringNode() string_node += nodes.Text(name) literal += string_node case _: From 3fcb7303ee7c5923613f91fc3655fd3d3c5b696b Mon Sep 17 00:00:00 2001 From: Adam Turner <9087854+aa-turner@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:12:33 +0000 Subject: [PATCH 17/17] Add type annotations, minor clean-ups - Add a module docstring - Use the snake_case convention for node classes - Make :group: a required option - declare parallel_write_safe = True --- Doc/conf.py | 2 +- Doc/tools/extensions/grammar_snippet.py | 60 ++++++++++++++++++------- 2 files changed, 44 insertions(+), 18 deletions(-) diff --git a/Doc/conf.py b/Doc/conf.py index 87dcf725d74f50..0b7bd6516af57f 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -27,13 +27,13 @@ 'c_annotations', 'changes', 'glossary_search', + 'grammar_snippet', 'lexers', 'pydoc_topics', 'pyspecific', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.extlinks', - 'grammar_snippet', ] # Skip if downstream redistributors haven't installed them diff --git a/Doc/tools/extensions/grammar_snippet.py b/Doc/tools/extensions/grammar_snippet.py index dc0f5a7c293ad9..03c7e7ce2f4228 100644 --- a/Doc/tools/extensions/grammar_snippet.py +++ b/Doc/tools/extensions/grammar_snippet.py @@ -1,4 +1,9 @@ +"""Support for documenting Python's grammar.""" + +from __future__ import annotations + import re +from typing import TYPE_CHECKING from docutils import nodes from docutils.parsers.rst import directives @@ -7,13 +12,28 @@ from sphinx.util.docutils import SphinxDirective from sphinx.util.nodes import make_id +if TYPE_CHECKING: + from collections.abc import Sequence + from typing import Any + + from docutils.nodes import Node + from sphinx.application import Sphinx + from sphinx.util.typing import ExtensionMetadata -class SnippetStringNode(nodes.inline): + +class snippet_string_node(nodes.inline): # noqa: N801 (snake_case is fine) """Node for a string literal in a grammar snippet.""" - def __init__(self) -> None: + def __init__( + self, + rawsource: str = '', + text: str = '', + *children: Node, + **attributes: Any, + ) -> None: + super().__init__(rawsource, text, *children, **attributes) # Use the Pygments highlight class for `Literal.String.Other` - super().__init__(classes=['sx']) + self['classes'].append('sx') class GrammarSnippetBase(SphinxDirective): @@ -21,7 +41,9 @@ class GrammarSnippetBase(SphinxDirective): # The option/argument handling is left to the individual classes. - def make_grammar_snippet(self, options, content): + def make_grammar_snippet( + self, options: dict[str, Any], content: Sequence[str] + ) -> list[nodes.paragraph]: """Create a literal block from options & content.""" group_name = options['group'] @@ -65,12 +87,12 @@ def make_grammar_snippet(self, options, content): last_pos = match.end() # Handle matches - groupdict = { + group_dict = { name: content for name, content in match.groupdict().items() if content is not None } - match groupdict: + match group_dict: case {'rule_name': name}: literal += self.make_link_target_for_token( group_name, name @@ -78,9 +100,7 @@ def make_grammar_snippet(self, options, content): case {'rule_ref': ref_text}: literal += token_xrefs(ref_text, group_name) case {'single_quoted': name} | {'double_quoted': name}: - string_node = SnippetStringNode() - string_node += nodes.Text(name) - literal += string_node + literal += snippet_string_node('', name) case _: raise ValueError('unhandled match') literal += nodes.Text(line[last_pos:] + '\n') @@ -93,8 +113,10 @@ def make_grammar_snippet(self, options, content): return [node] - def make_link_target_for_token(self, group_name, name): - """Return a literal node which is a link target for the given token""" + def make_link_target_for_token( + self, group_name: str, name: str + ) -> addnodes.literal_strong: + """Return a literal node which is a link target for the given token.""" name_node = addnodes.literal_strong() # Cargo-culted magic to make `name_node` a link target @@ -138,7 +160,7 @@ class GrammarSnippetDirective(GrammarSnippetBase): has_content = True option_spec = { - 'group': directives.unchanged, + 'group': directives.unchanged_required, } # We currently ignore arguments. @@ -146,12 +168,12 @@ class GrammarSnippetDirective(GrammarSnippetBase): optional_arguments = 1 final_argument_whitespace = True - def run(self): + def run(self) -> list[nodes.paragraph]: return self.make_grammar_snippet(self.options, self.content) class CompatProductionList(GrammarSnippetBase): - """Create grammar snippets from ReST productionlist syntax + """Create grammar snippets from reST productionlist syntax This is intended to be a transitional directive, used while we switch from productionlist to grammar-snippet. @@ -165,7 +187,7 @@ class CompatProductionList(GrammarSnippetBase): final_argument_whitespace = True option_spec = {} - def run(self): + def run(self) -> list[nodes.paragraph]: # The "content" of a productionlist is actually the first and only # argument. The first line is the group; the rest is the content lines. lines = self.arguments[0].splitlines() @@ -185,9 +207,13 @@ def run(self): return self.make_grammar_snippet(options, content) -def setup(app): +def setup(app: Sphinx) -> ExtensionMetadata: app.add_directive('grammar-snippet', GrammarSnippetDirective) app.add_directive_to_domain( 'std', 'productionlist', CompatProductionList, override=True ) - return {'version': '1.0', 'parallel_read_safe': True} + return { + 'version': '1.0', + 'parallel_read_safe': True, + 'parallel_write_safe': True, + }