Python docutils.nodes.row() Examples
The following are 30
code examples of docutils.nodes.row().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
docutils.nodes
, or try the search function
.
Example #1
Source File: tables.py From cadquery-freecad-module with GNU Lesser General Public License v3.0 | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #2
Source File: swaggerv2_doc.py From sphinx-swaggerdoc with MIT License | 6 votes |
def make_parameters(self, parameters): entries = [] head = ['Name', 'Position', 'Description', 'Type'] body = [] for param in parameters: row = [] row.append(param.get('name', '')) row.append(param.get('in', '')) row.append(param.get('description', '')) row.append(param.get('type', '')) body.append(row) table = self.create_table(head, body) paragraph = nodes.paragraph() paragraph += nodes.strong('', 'Parameters') entries.append(paragraph) entries.append(table) return entries
Example #3
Source File: tables.py From aws-extender with MIT License | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #4
Source File: speciescatalog.py From stdpopsim with GNU General Public License v3.0 | 6 votes |
def models_table(self, species): table = nodes.table() tgroup = nodes.tgroup(cols=2) for _ in range(2): colspec = nodes.colspec(colwidth=1) tgroup.append(colspec) table += tgroup thead = nodes.thead() tgroup += thead row = nodes.row() entry = nodes.entry() entry += nodes.paragraph(text="ID") row += entry entry = nodes.entry() entry += nodes.paragraph(text="Description") row += entry thead.append(row) rows = [] for model in species.demographic_models: row = nodes.row() rows.append(row) mid = self.get_demographic_model_id(species, model) entry = nodes.entry() para = nodes.paragraph() entry += para para += nodes.reference(internal=True, refid=mid, text=model.id) row += entry entry = nodes.entry() entry += nodes.paragraph(text=model.description) row += entry tbody = nodes.tbody() tbody.extend(rows) tgroup += tbody return table
Example #5
Source File: markdown_writer.py From sphinx-markdown-builder with MIT License | 6 votes |
def depart_autosummary_table(self, node): """Sphinx autosummary See http://www.sphinx- doc.org/en/master/usage/extensions/autosummary.html.""" pass ################################################################################ # tables # # docutils.nodes.table # docutils.nodes.tgroup [cols=x] # docutils.nodes.colspec # # docutils.nodes.thead # docutils.nodes.row # docutils.nodes.entry # docutils.nodes.entry # docutils.nodes.entry # # docutils.nodes.tbody # docutils.nodes.row # docutils.nodes.entry
Example #6
Source File: swaggerv2_doc.py From sphinx-swaggerdoc with MIT License | 6 votes |
def create_table(self, head, body, colspec=None): table = nodes.table() tgroup = nodes.tgroup() table.append(tgroup) # Create a colspec for each column if colspec is None: colspec = [1 for n in range(len(head))] for width in colspec: tgroup.append(nodes.colspec(colwidth=width)) # Create the table headers thead = nodes.thead() thead.append(self.row(head)) tgroup.append(thead) # Create the table body tbody = nodes.tbody() tbody.extend([self.row(r) for r in body]) tgroup.append(tbody) return table
Example #7
Source File: states.py From aws-extender with MIT License | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #8
Source File: tables.py From faces with GNU General Public License v2.0 | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #9
Source File: states.py From blackmamba with MIT License | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #10
Source File: states.py From faces with GNU General Public License v2.0 | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #11
Source File: tables.py From blackmamba with MIT License | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #12
Source File: tables.py From faces with GNU General Public License v2.0 | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #13
Source File: states.py From faces with GNU General Public License v2.0 | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #14
Source File: states.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 6 votes |
def build_table(self, tabledata, tableline, stub_columns=0, widths=None): colwidths, headrows, bodyrows = tabledata table = nodes.table() if widths == 'auto': table['classes'] += ['colwidths-auto'] elif widths: # "grid" or list of integers table['classes'] += ['colwidths-given'] tgroup = nodes.tgroup(cols=len(colwidths)) table += tgroup for colwidth in colwidths: colspec = nodes.colspec(colwidth=colwidth) if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 tgroup += colspec if headrows: thead = nodes.thead() tgroup += thead for row in headrows: thead += self.build_table_row(row, tableline) tbody = nodes.tbody() tgroup += tbody for row in bodyrows: tbody += self.build_table_row(row, tableline) return table
Example #15
Source File: states.py From faces with GNU General Public License v2.0 | 6 votes |
def build_table(self, tabledata, tableline, stub_columns=0, widths=None): colwidths, headrows, bodyrows = tabledata table = nodes.table() if widths: table['classes'] += ['colwidths-%s' % widths] tgroup = nodes.tgroup(cols=len(colwidths)) table += tgroup for colwidth in colwidths: colspec = nodes.colspec(colwidth=colwidth) if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 tgroup += colspec if headrows: thead = nodes.thead() tgroup += thead for row in headrows: thead += self.build_table_row(row, tableline) tbody = nodes.tbody() tgroup += tbody for row in bodyrows: tbody += self.build_table_row(row, tableline) return table
Example #16
Source File: states.py From cadquery-freecad-module with GNU Lesser General Public License v3.0 | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #17
Source File: states.py From bash-lambda-layer with MIT License | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #18
Source File: autosummary__init__.py From pyGSTi with Apache License 2.0 | 6 votes |
def autosummary_table_visit_html(self, node): """Make the first column of the table non-breaking.""" try: tbody = node[0][0][-1] for row in tbody: col1_entry = row[0] par = col1_entry[0] for j, subnode in enumerate(list(par)): if isinstance(subnode, nodes.Text): new_text = text_type(subnode.astext()) new_text = new_text.replace(u" ", u"\u00a0") par[j] = nodes.Text(new_text) except IndexError: pass # -- autodoc integration -------------------------------------------------------
Example #19
Source File: tables.py From bash-lambda-layer with MIT License | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #20
Source File: tables.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #21
Source File: states.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #22
Source File: schematable.py From altair with BSD 3-Clause "New" or "Revised" License | 6 votes |
def prepare_table_header(titles, widths): """Build docutil empty table """ ncols = len(titles) assert len(widths) == ncols tgroup = nodes.tgroup(cols=ncols) for width in widths: tgroup += nodes.colspec(colwidth=width) header = nodes.row() for title in titles: header += nodes.entry("", nodes.paragraph(text=title)) tgroup += nodes.thead("", header) tbody = nodes.tbody() tgroup += tbody return nodes.table("", tgroup), tbody
Example #23
Source File: states.py From deepWordBug with Apache License 2.0 | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #24
Source File: tables.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #25
Source File: tables.py From deepWordBug with Apache License 2.0 | 6 votes |
def parse_csv_data_into_rows(self, csv_data, dialect, source): # csv.py doesn't do Unicode; encode temporarily as UTF-8 csv_reader = csv.reader([self.encode_for_csv(line + '\n') for line in csv_data], dialect=dialect) rows = [] max_cols = 0 for row in csv_reader: row_data = [] for cell in row: # decode UTF-8 back to Unicode cell_text = self.decode_from_csv(cell) cell_data = (0, 0, 0, statemachine.StringList( cell_text.splitlines(), source=source)) row_data.append(cell_data) rows.append(row_data) max_cols = max(max_cols, len(row)) return rows, max_cols
Example #26
Source File: states.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 6 votes |
def build_table_row(self, rowdata, tableline): row = nodes.row() for cell in rowdata: if cell is None: continue morerows, morecols, offset, cellblock = cell attributes = {} if morerows: attributes['morerows'] = morerows if morecols: attributes['morecols'] = morecols entry = nodes.entry(**attributes) row += entry if ''.join(cellblock): self.nested_parse(cellblock, input_offset=tableline+offset, node=entry) return row
Example #27
Source File: tables.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 5 votes |
def check_list_content(self, node): if len(node) != 1 or not isinstance(node[0], nodes.bullet_list): error = self.state_machine.reporter.error( 'Error parsing content block for the "%s" directive: ' 'exactly one bullet list expected.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) list_node = node[0] # Check for a uniform two-level bullet list: for item_index in range(len(list_node)): item = list_node[item_index] if len(item) != 1 or not isinstance(item[0], nodes.bullet_list): error = self.state_machine.reporter.error( 'Error parsing content block for the "%s" directive: ' 'two-level bullet list expected, but row %s does not ' 'contain a second-level bullet list.' % (self.name, item_index + 1), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) elif item_index: # ATTN pychecker users: num_cols is guaranteed to be set in the # "else" clause below for item_index==0, before this branch is # triggered. if len(item[0]) != num_cols: error = self.state_machine.reporter.error( 'Error parsing content block for the "%s" directive: ' 'uniform two-level bullet list expected, but row %s ' 'does not contain the same number of items as row 1 ' '(%s vs %s).' % (self.name, item_index + 1, len(item[0]), num_cols), nodes.literal_block(self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) else: num_cols = len(item[0]) col_widths = self.get_column_widths(num_cols) return num_cols, col_widths
Example #28
Source File: tables.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 5 votes |
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns): table = nodes.table() if self.widths == 'auto': table['classes'] += ['colwidths-auto'] elif self.widths: # "grid" or list of integers table['classes'] += ['colwidths-given'] tgroup = nodes.tgroup(cols=len(col_widths)) table += tgroup for col_width in col_widths: colspec = nodes.colspec() if col_width is not None: colspec.attributes['colwidth'] = col_width if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 tgroup += colspec rows = [] for row in table_data: row_node = nodes.row() for cell in row: entry = nodes.entry() entry += cell row_node += entry rows.append(row_node) if header_rows: thead = nodes.thead() thead.extend(rows[:header_rows]) tgroup += thead tbody = nodes.tbody() tbody.extend(rows[header_rows:]) tgroup += tbody return table
Example #29
Source File: tables.py From AWS-Transit-Gateway-Demo-MultiAccount with MIT License | 5 votes |
def extend_short_rows_with_empty_cells(self, columns, parts): for part in parts: for row in part: if len(row) < columns: row.extend([(0, 0, 0, [])] * (columns - len(row)))
Example #30
Source File: progress.py From PGPy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def create_headrow(self, label="Progress", classes=('prog-top-label',)): hrow = nodes.row() hrow += nodes.entry('', nodes.paragraph(text=label), classes=['head'] + list(classes)) hrow += nodes.entry('', nodes.paragraph(text='PLACEHOLDER'), classes=['PLACEHOLDER']) return hrow