test.leo: semi-official tests
@settings
@data unl-path-prefixes
@data history-list
@button copy-test
Latex buttons
@@button Create LaTeX
@@clean c:\test\latex_test.tex
level 1
level 2
EKR Projects
--- demo.py stuff
@@@button Demo @key=Ctrl-9
<< imports >>
<< class myDemo >>
setup
setup_script
teardown
teardown_script
demo-script
First
Full featured outliner
Leo's panes
Leo creates scripts from outlines
Clones and cff
Last
Demo Area
@@button IntroSlides @key=Ctrl-9
<< imports >>
<< class IntroSlides >>
setup
setup_script
teardown
teardown_script
<< main >>
Leo's Main Window
intro-slides-script
Slide 1: Leo's main window
--- working graphics stuff
run turtledemo/penrose.py
mathjax rendering (VR3 only)
markdown example from .ipynb file
mathjax-test2.html
VR3: test TeX markup (works)
working VR examples
at-html tests
@html sphinx Link
@html display Leo tree (cool)
@html Leo's home page
at-image tests
@image LeoSplash (works, but very large)
@path C:\Repos\leo-editor\leo\Icons
@image LeoSplash (test: using @path)
at-pyplot tests
@pyplot barchar_demo
@pyplot basic_example 1
@pyplot basic_example 2
@pyplot matplotlib animate_decay
data_gen
init
run
@pyplot two graphs in same window
at-svg tests
@svg bubbles sources
@svg bubbles.svg
docstring test
markdown tests
markdown test (including url)
markdown test (math)
markdown test (link)
rst tests
rst test (including url)
rst example
python
python code block in rst
URLs tests
@url Leo's home page (md)
@url Leo's home page (rST)
@url mail (rst)
@url weather (md)
--- broken graphics stuff
barchar_demo (Matplot plugin)
md pictures
Render latex in VR (not ready for Python 3)
--- Group theory
Quaternions
@@button show-table
class Table
init
print_*
solve
Alternating group table
Matrix Group
define group
row1, row2
--- jinja
@jinja
@jinja template
@jinja inputs
@jinja variable name
update_jinja
jinja2
Tests
Tests of UNL (do not delete!)
@file ../plugins/importers/__init__.py
@clean ../plugins/leo_babel/__init__.py
Error messages (copy to log)
Legacy UNLs: in this file
Legacy UNLs: other files
URLs and UNLs:
New UNLs with empty file parts
New UNLs with short file parts
New UNLs with relative file parts
New UNLS with long(absolute) file parts
Syntax coloring tests
rust colorizing test
nim syntax coloring test
csharp colorizer test
wikiview test
--- coloring tests
test: python
test: md
test: jupytext: both
test: jupytext: md
test: jupytext: python
Test of @others
test: language rest/python
test: at-language md/python
test: @killcolor
test: @nocolor & @color
test: python & md
cweb
elisp (lisp)
--- jupytext stuff
--- jupytext sources (study only)
Python3.12/Lib/site-packages/jupytext
__init__.py (see __all__)
__main__.py
cell_metadata.py
class RLogicalValueError
class RMarkdownOptionParsingError
function: _py_logical_values
function: metadata_to_rmd_options
function: update_metadata_from_rmd_options
class ParsingContext
ParsingContext.__init__
ParsingContext.in_global_expression
ParsingContext.count_special_chars
function: parse_rmd_options
function: rmd_options_to_metadata
function: try_eval_metadata
function: is_active
function: metadata_to_double_percent_options
function: incorrectly_encoded_metadata
function: is_identifier
function: is_valid_metadata_key
function: is_jupyter_language
function: parse_key_equal_value
function: relax_json_loads
function: is_json_metadata
function: text_to_metadata
function: metadata_to_text
cell_reader.py
function: uncomment
function: paragraph_is_fully_commented
function: next_code_is_indented
function: count_lines_to_next_cell
function: last_two_lines_blank
class BaseCellReader
BaseCellReader.__init__
BaseCellReader.read
BaseCellReader.metadata_and_language_from_option_line
BaseCellReader.options_to_metadata
BaseCellReader.find_cell_end
BaseCellReader.find_cell_content
BaseCellReader.uncomment_code_and_magics
BaseCellReader.extract_content
class MarkdownCellReader
MarkdownCellReader.__init__
MarkdownCellReader.metadata_and_language_from_option_line
MarkdownCellReader.options_to_metadata
MarkdownCellReader.find_cell_end
MarkdownCellReader.uncomment_code_and_magics
class RMarkdownCellReader
RMarkdownCellReader.options_to_metadata
RMarkdownCellReader.uncomment_code_and_magics
class ScriptCellReader
ScriptCellReader.uncomment_code_and_magics
class RScriptCellReader
RScriptCellReader.options_to_metadata
RScriptCellReader.find_cell_end
class LightScriptCellReader
LightScriptCellReader.__init__
LightScriptCellReader.metadata_and_language_from_option_line
LightScriptCellReader.options_to_metadata
LightScriptCellReader.find_cell_end
LightScriptCellReader.find_region_end
class DoublePercentScriptCellReader
DoublePercentScriptCellReader.__init__
DoublePercentScriptCellReader.metadata_and_language_from_option_line
DoublePercentScriptCellReader.find_cell_content
DoublePercentScriptCellReader.find_cell_end
class HydrogenCellReader
class SphinxGalleryScriptCellReader
SphinxGalleryScriptCellReader.__init__
SphinxGalleryScriptCellReader.start_of_new_markdown_cell
SphinxGalleryScriptCellReader.metadata_and_language_from_option_line
SphinxGalleryScriptCellReader.find_cell_end
SphinxGalleryScriptCellReader.find_cell_content
cell_to_text.py
function: cell_source
function: three_backticks_or_more
class BaseCellExporter
BaseCellExporter.__init__
BaseCellExporter.is_code
BaseCellExporter.use_triple_quotes
BaseCellExporter.cell_to_text
BaseCellExporter.markdown_to_text
BaseCellExporter.code_to_text
BaseCellExporter.remove_eoc_marker
class MarkdownCellExporter
MarkdownCellExporter.__init__
MarkdownCellExporter.html_comment
MarkdownCellExporter.cell_to_text
MarkdownCellExporter.code_to_text
class RMarkdownCellExporter
RMarkdownCellExporter.__init__
RMarkdownCellExporter.code_to_text
function: endofcell_marker
class LightScriptCellExporter
LightScriptCellExporter.__init__
LightScriptCellExporter.is_code
LightScriptCellExporter.code_to_text
LightScriptCellExporter.explicit_start_marker
LightScriptCellExporter.remove_eoc_marker
class BareScriptCellExporter
class RScriptCellExporter
RScriptCellExporter.__init__
RScriptCellExporter.code_to_text
class DoublePercentCellExporter
DoublePercentCellExporter.__init__
DoublePercentCellExporter.cell_to_text
class HydrogenCellExporter
class SphinxGalleryCellExporter
SphinxGalleryCellExporter.__init__
SphinxGalleryCellExporter.cell_to_text
cli.py
function: system
function: tool_version
function: str2bool
function: parse_jupytext_args
function: jupytext
function: jupytext_single_file
function: notebooks_in_git_index
function: is_untracked
function: print_paired_paths
function: set_format_options
function: set_prefix_and_suffix
class NotAPairedNotebook
class InconsistentVersions
function: file_in_git_index
function: git_timestamp
function: get_timestamp
function: load_paired_notebook
function: exec_command
function: pipe_notebook
function: execution_counts_are_in_order
function: code_cells_have_changed
combine.py
function: black_invariant
function: same_content
function: combine_inputs_with_outputs
function: map_outputs_to_inputs
compare.py
function: _multilines
function: compare
function: filtered_cell
function: filtered_notebook_metadata
class NotebookDifference
function: same_content
function: compare_notebooks
function: compare_cells
function: test_round_trip_conversion
function: create_mirror_file_if_missing
function: assert_conversion_same_as_mirror
function: notebook_model
config.py
class JupytextConfigurationError
class JupytextConfiguration
JupytextConfiguration.set_default_format_options
JupytextConfiguration.default_formats
JupytextConfiguration.__eq__
function: preferred_format
function: global_jupytext_configuration_directories
function: find_global_jupytext_configuration_file
function: find_jupytext_configuration_file
function: parse_jupytext_configuration_file
function: load_jupytext_configuration_file
function: load_jupytext_config
function: validate_jupytext_configuration_file
function: notebook_formats
contentsmanager.py
function: build_jupytext_contents_manager_class
class JupytextContentsManager
JupytextContentsManager.__init__
JupytextContentsManager.all_nb_extensions
JupytextContentsManager.drop_paired_notebook
JupytextContentsManager.update_paired_notebooks
JupytextContentsManager.create_prefix_dir
JupytextContentsManager.save
JupytextContentsManager.get
function: new_untitled
function: increment_notebook_filename
function: trust_notebook
function: rename_file
function: get_parent_dir
function: get_config_file
function: load_config_file
function: get_config
doxygen.py
function: markdown_to_doxygen
function: doxygen_to_markdown
formats.py
class JupytextFormatError
class NotebookFormatDescription
NotebookFormatDescription.__init__
function: get_format_implementation
function: read_metadata
function: read_format_from_metadata
function: guess_format
function: divine_format
function: check_file_version
function: format_name_for_ext
function: identical_format_path
function: update_jupytext_formats_metadata
function: rearrange_jupytext_metadata
function: long_form_one_format
function: long_form_multiple_formats
function: short_form_one_format
function: short_form_multiple_formats
function: validate_one_format
function: auto_ext_from_metadata
function: check_auto_ext
function: formats_with_support_for_cell_metadata
header.py
function: insert_or_test_version_number
function: uncomment_line
function: encoding_and_executable
function: insert_jupytext_info_and_filter_metadata
function: metadata_and_cell_to_header
function: recursive_update
function: header_to_metadata_and_cell
jupytext.py (main entry points)
class NotSupportedNBFormatVersion(NotImplementedError)
class TextNotebookConverter(NotebookReader, NotebookWriter)
TextNotebookConverter.__init__
TextNotebookConverter.update_fmt_with_notebook_options
TextNotebookConverter.reads
TextNotebookConverter.filter_notebook
TextNotebookConverter.writes
function: reads (entry point: jupytext.py)
function: read (entry point: jupytext.py)
function: writes (entry point: jupytext.py)
function: drop_text_representation_metadata (jupytext.py)
function: write (entry_point: jupytext.py)
function: create_prefix_dir
function: _warn_on_unsupported_keys
kernels.py
function: set_kernelspec_from_language
function: kernelspec_from_language
languages.py
function: default_language_from_metadata_and_ext
function: usual_language_name
function: same_language
function: set_main_and_cell_language
function: cell_language
function: comment_lines
magics.py
function: get_comment
function: is_magic
function: need_explicit_marker
function: comment_magic
function: unesc
function: uncomment_magic
function: is_escaped_code_start
function: escape_code_start
function: unescape_code_start
metadata_filter.py
function: metadata_filter_as_dict
function: metadata_filter_as_string
function: update_metadata_filters
function: filter_metadata
function: second_level
function: suppress_unsupported_keys
function: subset_metadata
function: restore_filtered_metadata
myst.py
function: is_myst_available
function: raise_if_myst_is_not_available
function: myst_version
function: myst_extensions
function: get_parser
function: matches_mystnb
class CompactDumper
function: represent_list
function: represent_dict
function: dump_yaml_blocks
function: from_nbnode
class MystMetadataParsingError
function: strip_blank_lines
function: read_fenced_cell
function: parse_directive_options
function: read_cell_metadata
function: myst_to_notebook
function: notebook_to_myst
paired_paths.py
class InconsistentPath
function: split
function: join
function: separator
function: base_path
function: full_path
function: find_base_path_and_format
function: paired_paths
pairs.py
class PairedFilesDiffer
function: write_pair
function: latest_inputs_and_outputs
function: read_pair
pandoc.py
class PandocError
function: pandoc
function: is_pandoc_available
function: raise_if_pandoc_is_not_available
function: pandoc_version
function: md_to_notebook
function: notebook_to_md
pep8.py
function: next_instruction_is_function_or_class
function: cell_ends_with_function_or_class
function: cell_ends_with_code
function: cell_has_code
function: pep8_lines_between_cells
quarto.py
class QuartoError
function: quarto
function: is_quarto_available
function: raise_if_quarto_is_not_available
function: quarto_version
function: qmd_to_notebook
function: notebook_to_qmd
reraise.py
function: reraise
stringparser.py
class StringParser
StringParser.__init__
StringParser.is_quoted
StringParser.read_line
version.py
Python3.12/Lib/site-packages/jupytext_config
__init__.py (empty)
__main__.py
jupytext_config.py
class SubCommand
SubCommand.__init__
SubCommand.main
class ListDefaultViewer
ListDefaultViewer.__init__
ListDefaultViewer.main
ListDefaultViewer.fill_parser
class SetDefaultViewer
SetDefaultViewer.__init__
SetDefaultViewer.main
SetDefaultViewer.fill_parser
class UnsetDefaultViewer
UnsetDefaultViewer.__init__
UnsetDefaultViewer.main
UnsetDefaultViewer.fill_parser
function: main
labconfig.py
class LabConfig
LabConfig.__init__
LabConfig.read
LabConfig.get_viewers
LabConfig.list_default_viewer
LabConfig.set_default_viewers
LabConfig.set_default_viewer
LabConfig.unset_default_viewers
LabConfig.unset_default_viewer
LabConfig.write
script: test-jupytext
<< Use cli: --to fmt ekr-small-test.ipynb >>
--- jupytext: files
##@jupytext c:/test/minimal_test.ipynb
@path C:/Test/
##@jupytext Notebooks-intro.ipynb
##@jupytext c:/test/ekr-small-test.ipynb
<< preamble >>
python
node 1
node 2
markdown
##@jupytext c:/test/example-data-analysis.ipynb
<< prefix >>
# Loading and Analyzing Data
programmers like shortening names for
## Load the air quality data
Download the data from the EPA website
air_2019_df =
"{} rows for 2019, {} rows for
## Aggregate and average MA data by city
Step 1 - filter the data down by state
now trim down to just the columns we
now group all the records by city and
## Visually Inspect the Data
alt.Chart(ma_city_avg_df,
##@jupytext c:/test/example-data-analysis.ipynb
<< prefix >>
# Loading and Analyzing Data
programmers like shortening names for
## Load the air quality data
Download the data from the EPA website
air_2019_df =
"{} rows for 2019, {} rows for
## Aggregate and average MA data by city
Step 1 - filter the data down by state
now trim down to just the columns we
now group all the records by city and
## Visually Inspect the Data
alt.Chart(ma_city_avg_df,
read .ipynb into a node
##@clean C:/Users/Dev/EKR-Study/test
##@jupytext C:/Users/Dev/EKR-Study/test.ipynb
<< prefix >>
imports
EKR: new
Images
Figures 1 & 2
--- most recent coloring tests
GLSL
js
html w/ js
html
vue.js (using @language)
vue.js
html error
css
ai script
function: summarize_and_reformat
function: process_node_with_openai
function: add_openai_command
@language python
"""
Changes LaTeX section definition levels in the subtree of an @clean file node to their subtree level.
Only one LaTeX section level can therefore be used within a single node body.
"""
import re
section_levels = {
1: 'chapter',
2: 'section',
3: 'subsection',
4: 'subsubsection',
5: 'paragraph',
6: 'subparagraph'
}
def latex_convert_section_levels(p, adjusted_level_name):
""" Replaces LaTeX section definition levels found on a single line (re multiline mode).
Returns the modified node body."""
return re.sub(r'\\(chapter|section|subsection|subsubsection|paragraph|subparagraph)(\[.*?\])?({.*})',
r'\\'+adjusted_level_name+r'\g<2>\g<3>', p.b, re.M)
u, undoType = c.undoer, 'change-latex'
h = p.h.strip()
if g.match_word(h, 0, '@clean') and h.endswith('.tex'):
bunch = u.beforeChangeTree(c.p)
changed, dirtyVnodeList = 0, []
root_level = p.level()
for p in p.subtree():
level = p.level() - root_level
if level < 7:
level_name = section_levels[level]
else:
level_name = 'subparagraph'
s = latex_convert_section_levels(p, level_name)
if s != p.b:
bunch2 = u.beforeChangeNodeContents(p)
p.b = s
u.afterChangeNodeContents(p, undoType, bunch2, dirtyVnodeList=dirtyVnodeList)
p.v.setDirty()
changed += 1
if changed:
u.afterChangeTree(c.p, undoType, bunch)
g.es('Changed %s node%s.' % (changed, g.plural(changed)))
else:
g.es('No nodes changed')
else:
g.es('not a LaTeX file node?')
@language tex
@others
\paragraph{one}
\paragraph{two}
# Note: Modify @data unl-path-prefixes (in this file)
# To match the paths on your machine.
# These links will be active only if the @<file> node exists.
# Absolute file names
# Exists, non-empty.
C:/Repos/leo-editor/leo/plugins/importers/__init__.py:1:0:flake8 error
# Exists, empty.
C:/Repos/leo-editor/leo/plugins/leo_babel/__init__.py:1:0:flake8 error
# Large.
# C:/Repos/leo-editor/leo/core/leoGlobals.py:100:0:flake8 error
h = 'Error messages (copy to log)'
p = g.findNodeAnywhere(c, h)
if p:
lines = [z for z in g.splitLines(p.b) if not z.strip().startswith('#')]
c.frame.log.put_html_links(''.join(lines))
else:
print('Not found:', h)
parse-body
diff-marked-nodes
refresh-from-disk
convert-unls
copy-test
https://github.com/leo-editor/leo-editor/pull/3215/files
# Exists: Recent tests.
unl:gnx://#ekr.20180311131424.1
# Error mssages (copy to log)
unl:gnx://#ekr.20230622112649.1
# Bad gnx
unl:gnx://xyzzy.leo#.20230622112649.1
# In LeoDocs.leo: will work only if LeoDocs.leo is open!
unl:gnx://#ekr.20100805171546.4412
# Shorter UNL:
unl://#Coloring tests-->Syntax coloring template
# Exists, non-empty.
unl://C:/Repos/leo-editor/leo/test/test.leo#@file ../plugins/importers/__init__.py
unl://#@file ../plugins/importers/__init__.py
# Exists, empty.
unl://C:/Repos/leo-editor/leo/test/test.leo#@clean ../plugins/leo_babel/__init__.py
unl://#@clean ../plugins/leo_babel/__init__.py
# Exists
unl://C:/Repos/leo-editor/leo/test/test.leo#Viewrendered examples
unl://#Viewrendered examples
unl://C:/Repos/leo-editor/leo/test/test.leo#Viewrendered examples-->Python code
unl://#Viewrendered examples-->Python code
# Exists: Recent tests
unl:gnx://test.leo#ekr.20180311131424.1
# Error mssages (copy to log)
unl:gnx://test.leo#ekr.20230622112649.1
# The following links depend on @data unl-path-prefixes.
# These links should open LeoDocs.leo if it is not already open.
# In LeoDocs.leo: Leo 6.7.3 release notes
unl:gnx://LeoDocs.leo#ekr.20230409052507.1
# In LeoDocs.leo: ** Read me first **
unl:gnx://LeoDocs.leo#ekr.20050831195449
# Bad gnx: xyzzy.leo does not exist.
unl:gnx://xyzzy.leo#.20230622112649.1
# The following links depend on @data unl-path-prefixes.
unl://LeoDocs.leo#Release Notes-->Leo 6.7.3 release notes
# In LeoDocs.leo
unl://LeoDocs.leo#Web pages
# In LeoDocs.leo. Will work only if LeoDocs.leo is open!
unl://#Web pages
# These unls will work only on EKR's machine.
# They are a test of absolute matches in g.openUNLFile.
# Exists: Recent tests
unl:gnx://c:\Repos\leo-editor\leo\test\test.leo#ekr.20180311131424.1
# Error mssages (copy to log)
unl:gnx://c:\Repos\leo-editor\leo\test\test.leo#ekr.20230622112649.1
# Should fail
unl:gnx://c:\Repos\leo-editor\leo\doc\test.leo#ekr.20230622112649.1
# The following links should open LeoDocs.leo if it is not already open.
# In LeoDocs.leo: Leo 6.7.3 release notes
unl:gnx://c:\Repos\leo-editor\leo\doc\LeoDocs.leo#ekr.20230409052507.1
# In LeoDocs.leo: ** Read me first **
unl:gnx://c:\Repos\leo-editor\leo\doc\LeoDocs.leo#ekr.20050831195449
# Bad gnx: xyzzy.leo does not exist.
unl:gnx://xyzzy.leo#.20230622112649.1
# Exists: Recent tests
unl:gnx://#ekr.20180311131424.1
# Error mssages (copy to log)
unl:gnx://#ekr.20230622112649.1
# The following will work only if LeoDocs.leo is open!
# In LeoDocs.leo: Leo 6.7.3 release notes
unl:gnx://#ekr.20230409052507.1
# In LeoDocs.leo: ** Read me first **
unl:gnx://#ekr.20050831195449
# These data work on EKR's machine. Modify for your own!
# lines have the form:
# x.leo: <absolute path to x.leo>
test.leo: c:/Repos/leo-editor/leo/test
LeoDocs.leo: c:/Repos/leo-editor/leo/doc
# In LeoDocs.leo: Leo 6.7.3 release notes
unl:gnx://../doc/LeoDocs.leo#ekr.20230409052507.1
# In LeoDocs.leo: ** Leo's Documentation **
unl:gnx://../doc/LeoDocs.leo#ekr.20040414161647
# Just open LeoDocs.leo.
unl:gnx://../doc/LeoDocs.leo#
@language python
Callout('Hello. This tutorial introduces Leo')
# Callout("First, I'll increase the text size for easier viewing")
# demo.next(wait=1.0)
# Create, move, promote, demote, hoist.
demo.retain(Title('Leo is a full featured outliner.'))
demo.wait(1.0)
###
demo.insert_node('a new node', keys=True, speed=10.0)
###
c.moveOutlineRight()
###
demo.end() # Test of early exit.
###
demo.insert_node('another headline')
###
demo.insert_node('yet another node')
###
p = g.findNodeInTree(c, demo.root, 'a new node')
assert p, 'a new node'
c.selectPosition(p)
demo.wait(0.25)
###
c.demote()
demo.wait(1.0)
###
demo.delete_retained_widgets()
# The body pane shows the text of the selected outline node.
# Section refs, @others, @file
# Callout('Thanks for watching')
###
# demo.next()
@language rest
@wrap
@language python
if c.isChanged(): c.save()
<< imports >>
<< class IntroSlides >>
<< main >>
# Do NOT use @others here.
try:
# The *same* command/key binding calls both demo-start and demo.next.
if getattr(g.app, 'demo', None):
g.app.demo.next()
else:
demo = IntroSlides(c)
main(c, demo,
auto_run=False,
hoist_node = "Leo's Main Window",
script_name='intro-slides-script',
)
except Exception:
# Exception can happen during startup.
g.app.demo = None
raise
from leo.core.leoQt import QtGui
import leo.plugins.demo as demo_module
# import imp
# imp.reload(demo_module)
class IntroSlides (demo_module.Demo):
@others
def setup(self, p=None):
c = self.c
self.end_on_exception = True # Good for debugging.
self.delta = 0
demo.set_text_delta(self.delta)
# self.set_youtube_position()
if hasattr(self, 'hoist_node'):
c.selectPosition(self.hoist_node)
c.hoist()
c.redraw()
def setup_script(self):
self.delete_widgets()
def teardown(self):
c = self.c
self.delete_all_widgets()
if self.delta > 0:
self.set_text_delta(-self.delta)
if self.hoist_node:
c.selectPosition(self.hoist_node)
c.dehoist()
c.redraw()
def teardown_script(self):
if self.auto_run:
# default wait.
self.wait(0.5)
def main(c, demo, script_name, auto_run=False, hoist_node=None):
g.cls()
k = c.k
class_name = demo.__class__.__name__
c.frame.log.clearTab('Log')
g.es_print('Starting', class_name)
k.demoNextKey = k.strokeFromSetting('Ctrl-9')
# Tell k.masterKeyHandler to process Ctrl-9 immediately.
# Binding demo-next in a setting does *not* work.
h = '@button %s @key=Ctrl-9' % class_name
p = g.findNodeAnywhere(c, h)
assert p, h
script_tree = g.findNodeInTree(c, p, script_name)
assert script_tree, repr(script_name)
demo.hoist_node = hoist_node and g.findNodeInTree(c, p, hoist_node)
demo.start(script_tree, auto_run=auto_run)
@language python
Callout("This is Leo's main window")
###
demo.next()
if c.isChanged(): c.save()
<< imports >>
<< class MyDemo >>
# Don't use @others here.
# The *same* command/key binding calls both demo-start and demo.next.
try:
if getattr(g.app, 'demo', None):
g.app.demo.next()
else:
g.cls()
c.frame.log.clearTab('Log')
g.es_print('Starting MyDemo')
c.k.demoNextKey = c.k.strokeFromSetting('Ctrl-9')
# Tell k.masterKeyHandler to process Ctrl-9 immediately.
# Binding demo-next in a setting does *not* work.
demo = MyDemo(c)
p = g.findNodeAnywhere(c, '@button Demo @key=Ctrl-9')
script_tree = g.findNodeInTree(c, p, 'demo-script')
demo.start(script_tree, auto_run=True)
except Exception:
g.app.demo = None
raise
command = r'python c:\Python\Python3.13\Lib\turtledemo\penrose.py'
g.execute_shell_commands(command)
@language html
@language md
<h2>Feed-forward Network Functions</h2>
We will consider a basic two layer neural network model, i.e a model that maps inputs to a hidden layer and then to an output layer. We will make **the following assumptions**
1. The final output will be a vector $Y$ with $K$ elements, $y_k$, where $y_k(\mathbf{x},\mathbf{w}) = p(C_1|\mathbf{x})$ is the probability that node $k$ is in class $C_1$ and $p(C_2|\mathbf{x}) = 1-p(C_1|\mathbf{x})$
2. The activation function at a given layer is an arbitrary nonlinear function of a linear combination of the inputs and parameters for that layer
3. The network is fully connected, i.e. every node at the input layer is connected to every node in the hidden layer and every node in the hidden layer is connected to every node in the output layer
4. A bias parameter is included at the hidden and output layers
Working from the input layer toward the output layer, we can build this model as follows:
<h3>Input Layer</h3>
Assume we have an input vector $\mathbf{x} \in \Re^D$. Then the input layer consists of $D+1$ nodes where the value of the $i^{th}$ node for $i=0\ldots D$, is 0 if $i=0$ and $x_i$, i.e. the $i^{th}$ value of $\mathbf{x}$, otherwise.
<h3>Hidden Layer</h3>
At the hidden layer we construct $M$ nodes where the value of $M$ depends on the specifics of the particular modeling problem. For each node, we define a *unit activation*, $a_m$, for $m=1\ldots M$ as <br/>
$a_m = \sum_{i=0}^D w_{ji}^{(1)}x_i$ <br/>
where the $(1)$ superscript indicates this weight is for the hidden layer. The output from each node, $z_m$, is then given by the value of a *fixed nonlinear function*, $h$, known as the *activation function*, acting on the unit activation<br/>
$z_m = h(a_m) = h \left( \sum_{i=0}^D w_{mi}^{(1)}x_i \right)$<br/>
Notice that $h$ is the same function for all nodes.
<h3>Output Layer</h3>
The process at the output layer is essentially the same as at the hidden layer. We construct $K$ nodes, where again the value of $K$ depends on the specific modeling problem. For each node, we again define a *unit activation*, $a_k$, for $k=1 \ldots K$ by<br/>
$a_k = \sum_{m=0}^M w_{km}^{(2)} z_m$ <br/>
We again apply a nonlinear activation function, say $y$, to produce the output<br/>
$y_k = y(a_k)$
Thus, the entire model can be summarized as a $K$ dimensional output vector $Y \in \Re^K$ where each element $y_k$ by<br/>
$y_k(\mathbf{x},\mathbf{w}) = y \left( \sum_{m=0}^M w_{km}^{(2)} h \left( \sum_{i=0}^D w_{mi}^{(1)}x_i \right) \right)$
<h3>Generalizations</h3>
There are a wide variety of generalizations possible for this model. Some of the more important ones for practical applications include
* Addition of hidden layers
* Inclusion of *skip-layer* connections, e.g. a connection from an input node directly to an output node
* Sparse network, i.e. not a fully connected network
<!DOCTYPE html>
<html>
<head>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({tex2jax: {inlineMath: [['$','$'], ['\\(','\\)']]}});
</script>
<script type="text/javascript" async
src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS_CHTML">
</script>
</head>
<body>
<p><mathjax>
<h2>Feed-forward Network Functions</h2>
We will consider a basic two layer neural network model, i.e a model that maps inputs to a hidden layer and then to an output layer. We will make <b>the following
assumptions</b>
1. The final output will be a vector $Y$ with $K$ elements, $y_k$, where $y_k(\mathbf{x},\mathbf{w}) = p(C_1|\mathbf{x})$ is the probability that node $k$ is in class $C_1$ and $p(C_2|\mathbf{x}) = 1-p(C_1|\mathbf{x})$
2. The activation function at a given layer is an arbitrary nonlinear function of a linear combination of the inputs and parameters for that layer
3. The network is fully connected, i.e. every node at the input layer is connected to every node in the hidden layer and every node in the hidden layer is connected to every node in the output layer
4. A bias parameter is included at the hidden and output layers
Working from the input layer toward the output layer, we can build this model as follows:
<h3>Input Layer</h3>
Assume we have an input vector $\mathbf{x} \in \Re^D$. Then the input layer consists of $D+1$ nodes where the value of the $i^{th}$ node for $i=0\ldots D$, is 0 if $i=0$ and $x_i$, i.e. the $i^{th}$ value of $\mathbf{x}$, otherwise.
<h3>Hidden Layer</h3>
At the hidden layer we construct $M$ nodes where the value of $M$ depends on the specifics of the particular modeling problem. For each node, we define a *unit activation*, $a_m$, for $m=1\ldots M$ as <br/>
$a_m = \sum_{i=0}^D w_{ji}^{(1)}x_i$ <br/>
where the $(1)$ superscript indicates this weight is for the hidden layer. The output from each node, $z_m$, is then given by the value of a *fixed nonlinear function*, $h$, known as the *activation function*, acting on the unit activation<br/>
$z_m = h(a_m) = h \left( \sum_{i=0}^D w_{mi}^{(1)}x_i \right)$<br/>
Notice that $h$ is the same function for all nodes.
<h3>Output Layer</h3>
The process at the output layer is essentially the same as at the hidden layer. We construct $K$ nodes, where again the value of $K$ depends on the specific modeling problem. For each node, we again define a *unit activation*, $a_k$, for $k=1 \ldots K$ by<br/>
$a_k = \\sum_{m=0}^M w_{km}^{(2)} z_m$ <br/>
We again apply a nonlinear activation function, say $y$, to produce the output<br/>
$y_k = y(a_k)$
Thus, the entire model can be summarized as a $K$ dimensional output vector $Y \in \Re^K$ where each element $y_k$ by<br/>
$y_k(\mathbf{x},\mathbf{w}) = y \left( \sum_{m=0}^M w_{km}^{(2)} h \left( \sum_{i=0}^D w_{mi}^{(1)}x_i \right) \right)$
<h3>Generalizations</h3>
There are a wide variety of generalizations possible for this model. Some of the more important ones for practical applications include
* Addition of hidden layers
* Inclusion of *skip-layer* connections, e.g. a connection from an input node directly to an output node
* Sparse network, i.e. not a fully connected network
<!-- $$ -->
</mathjax></p>
</body></html>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<!-- saved from url=(0043)file:///C:/Users/tom/.leo/leo_rst_html.html -->
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/">
<title>Formula</title>
<script type="text/javascript" src="./vr3-mathjax-sample_files/tex-mml-chtml.js.download"></script><style type="text/css">.CtxtMenu_InfoClose { top:.2em; right:.2em;}
.CtxtMenu_InfoContent { overflow:auto; text-align:left; font-size:80%; padding:.4em .6em; border:1px inset; margin:1em 0px; max-height:20em; max-width:30em; background-color:#EEEEEE; white-space:normal;}
.CtxtMenu_Info.CtxtMenu_MousePost {outline:none;}
.CtxtMenu_Info { position:fixed; left:50%; width:auto; text-align:center; border:3px outset; padding:1em 2em; background-color:#DDDDDD; color:black; cursor:default; font-family:message-box; font-size:120%; font-style:normal; text-indent:0; text-transform:none; line-height:normal; letter-spacing:normal; word-spacing:normal; word-wrap:normal; white-space:nowrap; float:none; z-index:201; border-radius: 15px; /* Opera 10.5 and IE9 */ -webkit-border-radius:15px; /* Safari and Chrome */ -moz-border-radius:15px; /* Firefox */ -khtml-border-radius:15px; /* Konqueror */ box-shadow:0px 10px 20px #808080; /* Opera 10.5 and IE9 */ -webkit-box-shadow:0px 10px 20px #808080; /* Safari 3 & Chrome */ -moz-box-shadow:0px 10px 20px #808080; /* Forefox 3.5 */ -khtml-box-shadow:0px 10px 20px #808080; /* Konqueror */ filter:progid:DXImageTransform.Microsoft.dropshadow(OffX=2, OffY=2, Color="gray", Positive="true"); /* IE */}
</style><style type="text/css">.CtxtMenu_MenuClose { position:absolute; cursor:pointer; display:inline-block; border:2px solid #AAA; border-radius:18px; -webkit-border-radius: 18px; /* Safari and Chrome */ -moz-border-radius: 18px; /* Firefox */ -khtml-border-radius: 18px; /* Konqueror */ font-family: "Courier New", Courier; font-size:24px; color:#F0F0F0}
.CtxtMenu_MenuClose span { display:block; background-color:#AAA; border:1.5px solid; border-radius:18px; -webkit-border-radius: 18px; /* Safari and Chrome */ -moz-border-radius: 18px; /* Firefox */ -khtml-border-radius: 18px; /* Konqueror */ line-height:0; padding:8px 0 6px /* may need to be browser-specific */}
.CtxtMenu_MenuClose:hover { color:white!important; border:2px solid #CCC!important}
.CtxtMenu_MenuClose:hover span { background-color:#CCC!important}
.CtxtMenu_MenuClose:hover:focus { outline:none}
</style><style type="text/css">.CtxtMenu_Menu { position:absolute; background-color:white; color:black; width:auto; padding:5px 0px; border:1px solid #CCCCCC; margin:0; cursor:default; font: menu; text-align:left; text-indent:0; text-transform:none; line-height:normal; letter-spacing:normal; word-spacing:normal; word-wrap:normal; white-space:nowrap; float:none; z-index:201; border-radius: 5px; /* Opera 10.5 and IE9 */ -webkit-border-radius: 5px; /* Safari and Chrome */ -moz-border-radius: 5px; /* Firefox */ -khtml-border-radius: 5px; /* Konqueror */ box-shadow:0px 10px 20px #808080; /* Opera 10.5 and IE9 */ -webkit-box-shadow:0px 10px 20px #808080; /* Safari 3 & Chrome */ -moz-box-shadow:0px 10px 20px #808080; /* Forefox 3.5 */ -khtml-box-shadow:0px 10px 20px #808080; /* Konqueror */}
.CtxtMenu_MenuItem { padding: 1px 2em; background:transparent;}
.CtxtMenu_MenuArrow { position:absolute; right:.5em; padding-top:.25em; color:#666666; font-family: null; font-size: .75em}
.CtxtMenu_MenuActive .CtxtMenu_MenuArrow {color:white}
.CtxtMenu_MenuArrow.CtxtMenu_RTL {left:.5em; right:auto}
.CtxtMenu_MenuCheck { position:absolute; left:.7em; font-family: null}
.CtxtMenu_MenuCheck.CtxtMenu_RTL { right:.7em; left:auto }
.CtxtMenu_MenuRadioCheck { position:absolute; left: .7em;}
.CtxtMenu_MenuRadioCheck.CtxtMenu_RTL { right: .7em; left:auto}
.CtxtMenu_MenuInputBox { padding-left: 1em; right:.5em; color:#666666; font-family: null;}
.CtxtMenu_MenuInputBox.CtxtMenu_RTL { left: .1em;}
.CtxtMenu_MenuComboBox { left:.1em; padding-bottom:.5em;}
.CtxtMenu_MenuSlider { left: .1em;}
.CtxtMenu_SliderValue { position:absolute; right:.1em; padding-top:.25em; color:#333333; font-size: .75em}
.CtxtMenu_SliderBar { outline: none; background: #d3d3d3}
.CtxtMenu_MenuLabel { padding: 1px 2em 3px 1.33em; font-style:italic}
.CtxtMenu_MenuRule { border-top: 1px solid #DDDDDD; margin: 4px 3px;}
.CtxtMenu_MenuDisabled { color:GrayText}
.CtxtMenu_MenuActive { background-color: #606872; color: white;}
.CtxtMenu_MenuDisabled:focus { background-color: #E8E8E8}
.CtxtMenu_MenuLabel:focus { background-color: #E8E8E8}
.CtxtMenu_ContextMenu:focus { outline:none}
.CtxtMenu_ContextMenu .CtxtMenu_MenuItem:focus { outline:none}
.CtxtMenu_SelectionMenu { position:relative; float:left; border-bottom: none; -webkit-box-shadow:none; -webkit-border-radius:0px; }
.CtxtMenu_SelectionItem { padding-right: 1em;}
.CtxtMenu_Selection { right: 40%; width:50%; }
.CtxtMenu_SelectionBox { padding: 0em; max-height:20em; max-width: none; background-color:#FFFFFF;}
.CtxtMenu_SelectionDivider { clear: both; border-top: 2px solid #000000;}
.CtxtMenu_Menu .CtxtMenu_MenuClose { top:-10px; left:-10px}
</style>
<style type="text/css">
/* +---------------+
/* | normalize.css |
* +---------------+
*/
/**
* Sources
*1. Docutils solarized-dark theme adapted from
* https://github.com/fladd/docutils-solarized
*
*2. Pygments additions by T. B. Passin.
*/
/*! normalize.css v3.0.2 | MIT License | git.io/normalize */
/**
* 1. Set default font family to sans-serif.
* 2. Prevent iOS text size adjust after orientation change, without disabling
* user zoom.
*/
html {
font-family: sans-serif; /* 1 */
-ms-text-size-adjust: 100%; /* 2 */
-webkit-text-size-adjust: 100%; /* 2 */
}
/**
* Remove default margin.
*/
body {
margin: 0;
}
/* HTML5 display definitions
========================================================================== */
/**
* Correct `block` display not defined for any HTML5 element in IE 8/9.
* Correct `block` display not defined for `details` or `summary` in IE 10/11
* and Firefox.
* Correct `block` display not defined for `main` in IE 11.
*/
article,
aside,
details,
figcaption,
figure,
footer,
header,
hgroup,
main,
menu,
nav,
section,
summary {
display: block;
}
/**
* 1. Correct `inline-block` display not defined in IE 8/9.
* 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.
*/
audio,
canvas,
progress,
video {
display: inline-block; /* 1 */
vertical-align: baseline; /* 2 */
}
/**
* Prevent modern browsers from displaying `audio` without controls.
* Remove excess height in iOS 5 devices.
*/
audio:not([controls]) {
display: none;
height: 0;
}
/**
* Address `[hidden]` styling not present in IE 8/9/10.
* Hide the `template` element in IE 8/9/11, Safari, and Firefox < 22.
*/
[hidden],
template {
display: none;
}
/* Links
========================================================================== */
/**
* Remove the gray background color from active links in IE 10.
*/
a {
background-color: transparent;
}
/**
* Improve readability when focused and also mouse hovered in all browsers.
*/
a:active,
a:hover {
outline: 0;
}
/* Text-level semantics
========================================================================== */
/**
* Address styling not present in IE 8/9/10/11, Safari, and Chrome.
*/
abbr[title] {
border-bottom: 1px dotted;
}
/**
* Address style set to `bolder` in Firefox 4+, Safari, and Chrome.
*/
b,
strong {
font-weight: bold;
}
/**
* Address styling not present in Safari and Chrome.
*/
dfn {
font-style: italic;
}
/**
* Address variable `h1` font-size and margin within `section` and `article`
* contexts in Firefox 4+, Safari, and Chrome.
*/
h1 {
font-size: 2em;
margin: 0.67em 0;
}
/**
* Address styling not present in IE 8/9.
*/
mark {
background: #ff0;
color: #000;
}
/**
* Address inconsistent and variable font size in all browsers.
*/
small {
font-size: 80%;
}
/**
* Prevent `sub` and `sup` affecting `line-height` in all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sup {
top: -0.5em;
}
sub {
bottom: -0.25em;
}
/* Embedded content
========================================================================== */
/**
* Remove border when inside `a` element in IE 8/9/10.
*/
img {
border: 0;
}
/**
* Correct overflow not hidden in IE 9/10/11.
*/
svg:not(:root) {
overflow: hidden;
}
/* Grouping content
========================================================================== */
/**
* Address margin not present in IE 8/9 and Safari.
*/
figure {
margin: 1em 40px;
}
/**
* Address differences between Firefox and other browsers.
*/
hr {
-moz-box-sizing: content-box;
box-sizing: content-box;
height: 0;
}
/**
* Contain overflow in all browsers.
*/
pre {
overflow: auto;
}
/**
* Address odd `em`-unit font size rendering in all browsers.
*/
code,
kbd,
pre,
samp {
font-family: monospace, monospace;
font-size: 1em;
}
/* Forms
========================================================================== */
/**
* Known limitation: by default, Chrome and Safari on OS X allow very limited
* styling of `select`, unless a `border` property is set.
*/
/**
* 1. Correct color not being inherited.
* Known issue: affects color of disabled elements.
* 2. Correct font properties not being inherited.
* 3. Address margins set differently in Firefox 4+, Safari, and Chrome.
*/
button,
input,
optgroup,
select,
textarea {
color: inherit; /* 1 */
font: inherit; /* 2 */
margin: 0; /* 3 */
}
/**
* Address `overflow` set to `hidden` in IE 8/9/10/11.
*/
button {
overflow: visible;
}
/**
* Address inconsistent `text-transform` inheritance for `button` and `select`.
* All other form control elements do not inherit `text-transform` values.
* Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.
* Correct `select` style inheritance in Firefox.
*/
button,
select {
text-transform: none;
}
/**
* 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`
* and `video` controls.
* 2. Correct inability to style clickable `input` types in iOS.
* 3. Improve usability and consistency of cursor style between image-type
* `input` and others.
*/
button,
html input[type="button"], /* 1 */
input[type="reset"],
input[type="submit"] {
-webkit-appearance: button; /* 2 */
cursor: pointer; /* 3 */
}
/**
* Re-set default cursor for disabled elements.
*/
button[disabled],
html input[disabled] {
cursor: default;
}
/**
* Remove inner padding and border in Firefox 4+.
*/
button::-moz-focus-inner,
input::-moz-focus-inner {
border: 0;
padding: 0;
}
/**
* Address Firefox 4+ setting `line-height` on `input` using `!important` in
* the UA stylesheet.
*/
input {
line-height: normal;
}
/**
* It's recommended that you don't attempt to style these elements.
* Firefox's implementation doesn't respect box-sizing, padding, or width.
*
* 1. Address box sizing set to `content-box` in IE 8/9/10.
* 2. Remove excess padding in IE 8/9/10.
*/
input[type="checkbox"],
input[type="radio"] {
box-sizing: border-box; /* 1 */
padding: 0; /* 2 */
}
/**
* Fix the cursor style for Chrome's increment/decrement buttons. For certain
* `font-size` values of the `input`, it causes the cursor style of the
* decrement button to change from `default` to `text`.
*/
input[type="number"]::-webkit-inner-spin-button,
input[type="number"]::-webkit-outer-spin-button {
height: auto;
}
/**
* 1. Address `appearance` set to `searchfield` in Safari and Chrome.
* 2. Address `box-sizing` set to `border-box` in Safari and Chrome
* (include `-moz` to future-proof).
*/
input[type="search"] {
-webkit-appearance: textfield; /* 1 */
-moz-box-sizing: content-box;
-webkit-box-sizing: content-box; /* 2 */
box-sizing: content-box;
}
/**
* Remove inner padding and search cancel button in Safari and Chrome on OS X.
* Safari (but not Chrome) clips the cancel button when the search input has
* padding (and `textfield` appearance).
*/
input[type="search"]::-webkit-search-cancel-button,
input[type="search"]::-webkit-search-decoration {
-webkit-appearance: none;
}
/**
* Define consistent border, margin, and padding.
*/
fieldset {
border: 1px solid #c0c0c0;
margin: 0 2px;
padding: 0.35em 0.625em 0.75em;
}
/**
* 1. Correct `color` not being inherited in IE 8/9/10/11.
* 2. Remove padding so people aren't caught out if they zero out fieldsets.
*/
legend {
border: 0; /* 1 */
padding: 0; /* 2 */
}
/**
* Remove default vertical scrollbar in IE 8/9/10/11.
*/
textarea {
overflow: auto;
}
/**
* Don't inherit the `font-weight` (applied by a rule above).
* NOTE: the default cannot safely be changed in Chrome and Safari on OS X.
*/
optgroup {
font-weight: bold;
}
/* Tables
========================================================================== */
/**
* Remove most spacing between table cells.
*/
table {
border-collapse: collapse;
border-spacing: 0;
}
td,
th {
padding: 0;
}
/* +--------------------+
* | docutils-solarized |
* +--------------------+
*/
html {
transform: scale(.85);
transform-origin: 0 0;
}
/* Document Structure
* ==================
*/
/* Document */
body {
color: #839496;
background: #002b36;
/*font-family: Palatino, "Palatino LT STD", "Palatino Linotype", "Book Antiqua", Georgia, serif;*/
font-family: Verdana, Arial, "Bitstream Vera Sans", sans-serif;
padding: 0 5%;
margin: 8px 0;
}
div.document {
font-size: 1em;
line-height:1.5;
counter-reset: table;
max-width: 50em;
margin: auto;
}
.align-left { text-align: left; }
.align-right { text-align: right; }
.align-center {
clear: both;
text-align: center;
}
/* Sections */
h1.title, p.subtitle {
text-align: center;
}
h1, h2, h3, h4, h5, h6 {
margin-top: 1em;
margin-bottom: 0em;
}
h1.title {
font-size: 2em;
margin-top: 4rem;
}
h1 + p.subtitle {
font-size: 1.5em;
margin-top: 1em;
margin-bottom: 4rem;
}
h1 + p.section-subtitle { font-size: 1.6em; }
h2 + p.section-subtitle { font-size: 1.28em; }
a.toc-backref {
color: #839496;
text-decoration: none;
}
a.toc-backref:visited {
color: #839496;
text-decoration: none;
}
/* Stop floating sidebars, images and figures at section level 1,2,3 */
h1, h2, h3 { clear: both; }
/* Transitions */
hr.docutils {
width: 100%;
margin-top: 2em;
margin-bottom: 2em;
clear: both;
border: 1px solid;
}
/* Paragraphs
* ==========
*/
/* vertical space (parskip) */
p, ol, ul, dl,
div.line-block,
table, pre, .figure {
margin-top: 1em;
margin-bottom: 1em;
}
dl > dd {
margin-bottom: 1em;
}
/* titles */
p.admonition-title,
p.topic-title,
p.sidebar-title,
p.system-message-title {
color: #ededed;
font-weight: bold;
}
p.subtitle,
p.section-subtitle,
p.sidebar-subtitle {
font-weight: bold;
margin-top: -0.5em;
}
p.sidebar-subtitle {
color: #ededed;
}
/* Warnings, Errors */
div.admonition {
background-color: #073642;
/*color: #ededed;*/
}
div.danger p.admonition-title,
div.error p.admonition-title,
div.system-messages h1,
div.error,
span.problematic,
p.system-message-title {
color: #dc322f;
}
div.caution p.admonition-title,
div.attention p.admonition-title,
div.warning p.admonition-title {
color: #cb4b16;
}
div.hint p.admonition-title,
div.tip p.admonition-title {
color: #859900;
}
div.important p.admonition-title {
color: #d33682;
}
div.note p.admonition-title {
color: #2aa198;
}
/* Lists
/* =====
*/
/* compact and simple lists: no margin between items */
dl.simple > dd, dl.compact > dd,
.compact li, .compact ul, .compact ol
.simple li, .simple ul, .simple ol,
.simple > li p, .compact > li p {
margin-top: 0;
margin-bottom: 0;
}
/* Enumerated Lists */
ol.arabic { list-style: decimal }
ol.loweralpha { list-style: lower-alpha }
ol.upperalpha { list-style: upper-alpha }
ol.lowerroman { list-style: lower-roman }
ol.upperroman { list-style: upper-roman }
/* Definition Lists */
dl > dd p:first-child { margin-top: 0; }
/* lists nested in definition lists */
dd > ul, dd > ol { padding-left: 0pt; }
dt span.classifier { font-style: italic }
dt span.classifier:before {
font-style: normal;
margin: 0.5em;
content: ":";
}
/* Field Lists */
/* bold field name, content starts on the same line */
dl.field-list > dt,
dl.option-list > dt,
dl.docinfo > dt,
dl.footnote > dt,
dl.citation > dt,
dl.description > dt {
font-weight: bold;
clear: left;
float: left;
margin: 0;
padding: 0;
padding-right: 0.5em;
}
/* Offset for field content (corresponds to the --field-name-limit option) */
dl.field-list > dd,
dl.option-list > dd,
dl.docinfo > dd {
margin-left: 9em; /* ca. 14 chars in the test examples */
}
/* start field-body on a new line after long field names */
dl.field-list > dd > *:first-child,
dl.option-list > dd > *:first-child,
dl.docinfo > dd > *:first-child {
display: inline-block;
width: 100%;
margin: 0;
}
/* field names followed by a colon */
dl.field-list > dt:after,
dl.docinfo > dt:after {
content: ":";
}
/* example for custom field-name width */
dl.field-list.narrow > dd {
margin-left: 5em;
}
/* run-in: start field-body on same line after long field names */
dl.field-list.run-in > dd p {
display: block;
}
/* Bibliographic Fields */
/* use special field-list dl.docinfo */
pre.address {
margin-bottom: 0;
margin-top: 0;
font: inherit;
}
dd.authors > p { margin: 0; }
div.abstract {
margin-top: 2em;
margin-bottom: 2em;
text-align: center;
}
div.dedication {
margin-top: 2em;
margin-bottom: 2em;
text-align: center;
font-style: italic;
}
div.dedication div.abstract p.topic-title {
font-style: normal;
}
/* Option Lists */
dl.option-list {
margin-left: 1em;
}
dl.option-list > dt {
font-weight: normal;
}
span.option {
color: #ededed;
background-color: #073642;
border: 1px solid #586e75;
white-space: nowrap;
}
/* Text Blocks
* ===========
*/
/* Line Blocks */
div.line-block {
display: block;
}
div.line-block div.line-block {
margin-top: 0;
margin-bottom: 0;
margin-left: 1.5em;
}
/* Literal Blocks */
pre.doctest-block,
pre.math, pre.code {
/*color: #93a1a1;*/
color: #ededed;
background: #073642;
font-family: Cousine, Courier, Droid Sans Mono, DejaVu Sans Mono, monospace;
border: 1px solid #586e75 !important;
padding: 1em;
border-radius: .7em;
}
pre.literal-block {
color: #eee8d5;
background: #586e75;
font-family: Cousine, Courier, Droid Sans Mono, DejaVu Sans Mono, monospace;
border: 1px solid #b0b0b0;
padding: 1em;
border-radius: .7em;
}
/* Block Quotes */
blockquote,
div.topic {
margin-left: 2em;
margin-right: 2em
}
blockquote > table,
div.topic > table {
margin-top: 0;
margin-bottom: 0;
}
blockquote p.attribution,
div.topic p.attribution {
text-align: right;
margin-left: 20%;
}
/* Tables
* ======
*/
/* margins and borders for "normal" tables */
table {
border-collapse: collapse;
border-top: 2px solid;
border-bottom: 2px solid;
}
td, th {
padding: 0.5ex 1ex;
text-align: left;
/* some borders missing at some magnifications
/* in Firefox 31.5.0 and opera 10.63 */
border-width: 1px;
}
td > p:first-child, th > p:first-child {
margin-top: 0;
}
td > p, th > p {
margin-bottom: 0;
}
th {
background: #586e75;
color: #eee8d5;
vertical-align: top;
border-bottom: thin solid #839496;
text-align: center;
}
th.docinfo-name {
text-align: right;
}
table > caption {
text-align: left;
}
table.borderless td, table.borderless th {
border: 0;
padding: 0;
padding-right: 0.5em /* separate table cells */
}
/* "booktabs" style (no vertical lines) */
table.booktabs {
border: 0;
border-top: 2px solid;
border-bottom: 2px solid;
border-collapse: collapse;
}
table.booktabs * {
border: 0;
}
table.booktabs th {
border-bottom: thin solid;
text-align: left;
}
/* numbered tables (counter defined in div.document) */
table.numbered > caption:before {
counter-increment: table;
content: "Table " counter(table) ": ";
font-weight: bold;
}
/* Explicit Markup Blocks
* ======================
*/
/* Footnotes and Citations
* -----------------------
*/
/* line on the left */
dl.footnote {
padding-left: 1ex;
border-left: solid;
border-left-width: 2px;
border-color: #839496;
}
dl > dt.label {
font-weight: normal;
}
dt.label > span.fn-backref {
margin: 0.2em;
}
dt.label > span.fn-backref > a {
font-style: italic;
}
/* Directives
* ----------
*/
/* Admonitions */
/* System Messages */
div.admonition,
div.system-message {
margin: 2em;
border: 2px solid;
padding-right: 1em;
padding-left: 1em;
background-color: #073642;
/*color: #ededed;*/
border-color: #839496;
}
/* Body Elements
* -------------
*/
/* Image and Figure */
img {
vertical-align: bottom;
}
div.figure {
margin-left: 2em;
margin-right: 2em;
}
img.align-left,
.figure.align-left,
object.align-left {
clear: left;
float: left;
margin-right: 1em
}
img.align-right,
.figure.align-right,
object.align-right {
clear: right;
float: right;
margin-left: 1em
}
img.align-center,
.figure.align-center,
object.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
/* reset inner alignment in figures */
div.align-right {
text-align: inherit }
p.caption {
font-style: italic;
}
object[type="image/svg+xml"],
object[type="application/x-shockwave-flash"] {
overflow: hidden;
}
/* Topic */
#table-of-contents {
margin-top: 2em;
margin-bottom: 2em;
}
/* Sidebar */
/* in a layout with fixed margins, */
/* the sidebar can be moved into the margin completely */
div.sidebar {
border: 2px solid;
border-color: #839496;
padding-right: 1em;
padding-left: 1em;
width: 36%;
max-width: 26em;
float: right;
clear: right;
margin-left: 1em;
margin-top: 2em;
margin-bottom: 2em;
background-color: #073642;
color: #ededed;
}
p.sidebar-title { font-size: larger; }
/* Code */
pre.code, code {
color: #ededed;
background-color: #073642
}
pre.code .ln {color: #586e75}
pre.code .err { color: #dc322f } /* Error */
pre.code .keyword { color: #859900 } /* Keyword */
pre.code .keyword.constant { color: #859900 } /* Keyword.Constant */
pre.code .keyword.declaration { color: #859900 } /* Keyword.Declaration */
pre.code .keyword.namespace { color: #cb4b16 } /* Keyword.Namespace */
pre.code .keyword.pseudo { color: #cb4b16 } /* Keyword.Pseudo */
pre.code .keyword.reversed { color: #859900 } /* Keyword.Reserved */
pre.code .keyword.type { color: #859900 } /* Keyword.Type */
pre.code .name { color: #ededed } /* Name */
pre.code .name.attribue { } /* Name.Attribute */
pre.code .name.bultin { color: #268bd2 } /* Name.Builtin */
pre.code .builtin.class { color: #268bd2 } /* Name.Builtin.Pseudo */
pre.code .name.class { color: #268bd2 } /* Name.Class */
pre.code .name.constant { color: #b58900 } /* Name.Constant */
pre.code .name.decorator { color: #cb4b16 } /* Name.Decorator */
pre.code .name.entity { color: #cb4b16 } /* Name.Entity */
pre.code .name.exception { color: #b58900 } /* Name.Exception */
pre.code .name.function { color: #268bd2 } /* Name.Function */
pre.code .name.label { } /* Name.Label */
pre.code .name.namespace { } /* Name.Namespace */
pre.code .name.other { } /* Name.Other */
pre.code .name.property { color: #268bd2 } /* Name.Property */
pre.code .name.tag { color: #859900 } /* Name.Tag */
pre.code .name.variable { color: #cb4b16 } /* Name.Variable */
pre.code .name.variable.class { color: #268bd2 } /* Name.Variable.Class */
pre.code .name.variable.global { color: #268bd2 } /* Name.Variable.Global */
pre.code .name.variable.instance { color: #268bd2 } /* Name.Variable.Instance */
pre.code .literal { } /* Literal */
pre.code .literal.date { } /* Literal.Date */
pre.code .literal.string { color: #2aa198 } /* Literal.String */
pre.code .literal.string.backtick { color: #2aa198 } /* Literal.String.Backtick */
pre.code .literal.string.char { color: #2aa198 } /* Literal.String.Char */
pre.code .literal.string.doc { color: #2aa198 } /* Literal.String.Doc */
pre.code .literal.string.double { color: #2aa198 } /* Literal.String.Double */
pre.code .literal.string.escape { color: #cb4b16 } /* Literal.String.Escape */
pre.code .literal.string.heredoc { color: #2aa198 } /* Literal.String.Heredoc */
pre.code .literal.string.interpol { color: #cb4b16 } /* Literal.String.Interpol */
pre.code .literal.string.other { color: #2aa198 } /* Literal.String.Other */
pre.code .literal.string.regex { color: #2aa198 } /* Literal.String.Regex */
pre.code .literal.string.single { color: #2aa198 } /* Literal.String.Single */
pre.code .literal.string.symbol { color: #2aa198 } /* Literal.String.Symbol */
pre.code .literal.number { color: #2aa198 } /* Literal.Number */
pre.code .literal.number.bin { color: #2aa198 } /* Literal.Number.Bin */
pre.code .literal.number.float { color: #2aa198 } /* Literal.Number.Float */
pre.code .literal.number.hex { color: #2aa198 } /* Literal.Number.Hex */
pre.code .literal.number.integer { color: #2aa198 } /* Literal.Number.Integer */
pre.code .literal.number.integer.long { color: #2aa198 } /* Literal.Number.Integer.Long */
pre.code .literal.number.oct { color: #2aa198 } /* Literal.Number.Oct */
pre.code .operator { } /* Operator */
pre.code .operator.word { color: #859900 } /* Operator.Word */
pre.code .punctiation { } /* Punctuation */
pre.code .comment { color: #586e75; font-style: italic } /* Comment */
pre.code .comment.multiline { color: #586e75; font-style: italic } /* Comment.Multiline */
pre.code .comment.preproc { color: #586e75; font-style: italic } /* Comment.Preproc */
pre.code .comment.single { color: #586e75; font-style: italic } /* Comment.Single */
pre.code .comment.special { color: #586e75; font-style: italic } /* Comment.Special */
pre.code .generic { } /* Generic */
pre.code .other { } /* Other */
pre.code .generic.deleted { } /* Generic.Deleted */
pre.code .generic.emph { } /* Generic.Emph */
pre.code .generic.error { } /* Generic.Error */
pre.code .generic.heading { } /* Generic.Heading */
pre.code .generic.inserted { } /* Generic.Inserted */
pre.code .generic.output { } /* Generic.Output */
pre.code .generic.prompt { } /* Generic.Prompt */
pre.code .generic.strong { } /* Generic.Strong */
pre.code .generic.subheading { } /* Generic.Subheading */
pre.code .generic.traceback { } /* Generic.Traceback */
/* Math */
/* styled separately (see math.css for math-output=HTML) */
/* Rubric */
p.rubric {
font-weight: bold;
font-size: larger;
color: #b58900;
}
/* Epigraph */
/* Highlights */
/* Pull-Quote */
/* Compound Paragraph */
/* Container */
/* can be styled in a custom stylesheet */
/* Document Header & Footer */
div.footer, div.header {
clear: both;
}
hr.header {
border: 1px solid;
margin: 0;
padding: 0;
}
hr.footer {
border: 1px solid;
margin: 0;
padding: 0;
}
div.header {
margin-top: 1em;
margin-bottom: 4em;
}
div.footer {
margin-top: 4em;
margin-bottom: 1em;
}
/* Contents */
div.topic.contents {
margin: 0; /* don't indent like a topic */
}
ul.auto-toc {
list-style-type: none;
}
/* Inline Markup
* =============
*/
* Emphasis */
/* em */
/* Strong Emphasis */
/* strong */
/* Interpreted Text */
/* span.interpreted */
/* Title Reference */
/* cite */
/* Inline Literals */
tt.literal, span.docutils.literal {
font-family: Courier, Droid Sans Mono, DejaVu Sans Mono, monospace;
font-size: large;
/*color: #ededed;
background-color: #073642;
border: 1px solid #586e75;*/
/* possible values: normal, nowrap, pre, pre-wrap, pre-line */
white-space: pre-wrap;
}
tt.docutils.literal {
font-family: Courier, Droid Sans Mono, DejaVu Sans Mono, monospace;
font-size: large;
/*color: #ededed;
background-color: #073642;*/
border: none; /*1px solid #586e75;*/
/* possible values: normal, nowrap, pre, pre-wrap, pre-line */
white-space: pre-wrap;
}
/* do not wraph a hyphens and similar: */
.literal > span.pre { white-space: nowrap; }
/* Hyperlink References */
a {
text-decoration: none;
color: #268bd2;
}
a:hover {
text-decoration: underline;
}
a:visited {
color: #6c71c4;
}
/* External Targets */
/* span.target.external */
/* Internal Targets */
/* span.target.internal */
/* Footnote References */
/* a.footnote-reference */
/* Citation References */
/* a.citation-reference */
// ============= Begin pygment-specific additions =====================
pre { line-height: 125%; }
td.linenos .normal { color: #586e75; background-color: #073642; padding-left: 5px; padding-right: 5px; }
span.linenos { color: #586e75; background-color: #073642; padding-left: 5px; padding-right: 5px; }
td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.hll { background-color: #073642 }
.c { color: #586e75; font-style: italic } /* Comment */
.err { color: #839496; background-color: #dc322f } /* Error */
.esc { color: #839496 } /* Escape */
.g { color: #839496 } /* Generic */
.k { color: #859900 } /* Keyword */
.l { color: #839496 } /* Literal */
.n { color: #839496 } /* Name */
.o { color: #586e75 } /* Operator */
.x { color: #839496 } /* Other */
.p { color: #839496 } /* Punctuation */
.ch { color: #586e75; font-style: italic } /* Comment.Hashbang */
.cm { color: #586e75; font-style: italic } /* Comment.Multiline */
.cp { color: #d33682 } /* Comment.Preproc */
.cpf { color: #586e75 } /* Comment.PreprocFile */
.c1 { color: #586e75; font-style: italic } /* Comment.Single */
.cs { color: #586e75; font-style: italic } /* Comment.Special */
.gd { color: #dc322f } /* Generic.Deleted */
.ge { color: #839496; font-style: italic } /* Generic.Emph */
.gr { color: #dc322f } /* Generic.Error */
.gh { color: #839496; font-weight: bold } /* Generic.Heading */
.gi { color: #859900 } /* Generic.Inserted */
.go { color: #839496 } /* Generic.Output */
.gp { color: #268bd2; font-weight: bold } /* Generic.Prompt */
.gs { color: #839496; font-weight: bold } /* Generic.Strong */
.gu { color: #839496; text-decoration: underline } /* Generic.Subheading */
.gt { color: #268bd2 } /* Generic.Traceback */
.kc { color: #2aa198 } /* Keyword.Constant */
.kd { color: #2aa198 } /* Keyword.Declaration */
.kn { color: #cb4b16 } /* Keyword.Namespace */
.kp { color: #859900 } /* Keyword.Pseudo */
.kr { color: #859900 } /* Keyword.Reserved */
.kt { color: #b58900 } /* Keyword.Type */
.ld { color: #839496 } /* Literal.Date */
.m { color: #2aa198 } /* Literal.Number */
.s { color: #2aa198 } /* Literal.String */
.na { color: #839496 } /* Name.Attribute */
.nb { color: #268bd2 } /* Name.Builtin */
.nc { color: #268bd2 } /* Name.Class */
.no { color: #268bd2 } /* Name.Constant */
.nd { color: #268bd2 } /* Name.Decorator */
.ni { color: #268bd2 } /* Name.Entity */
.ne { color: #268bd2 } /* Name.Exception */
.nf { color: #268bd2 } /* Name.Function */
.nl { color: #268bd2 } /* Name.Label */
.nn { color: #268bd2 } /* Name.Namespace */
.nx { color: #839496 } /* Name.Other */
.py { color: #839496 } /* Name.Property */
.nt { color: #268bd2 } /* Name.Tag */
.nv { color: #268bd2 } /* Name.Variable */
.ow { color: #859900 } /* Operator.Word */
.w { color: #839496 } /* Text.Whitespace */
.mb { color: #2aa198 } /* Literal.Number.Bin */
.mf { color: #2aa198 } /* Literal.Number.Float */
.mh { color: #2aa198 } /* Literal.Number.Hex */
.mi { color: #2aa198 } /* Literal.Number.Integer */
.mo { color: #2aa198 } /* Literal.Number.Oct */
.sa { color: #2aa198 } /* Literal.String.Affix */
.sb { color: #2aa198 } /* Literal.String.Backtick */
.sc { color: #2aa198 } /* Literal.String.Char */
.dl { color: #2aa198 } /* Literal.String.Delimiter */
.sd { color: #586e75 } /* Literal.String.Doc */
.s2 { color: #2aa198 } /* Literal.String.Double */
.se { color: #2aa198 } /* Literal.String.Escape */
.sh { color: #2aa198 } /* Literal.String.Heredoc */
.si { color: #2aa198 } /* Literal.String.Interpol */
.sx { color: #2aa198 } /* Literal.String.Other */
.sr { color: #cb4b16 } /* Literal.String.Regex */
.s1 { color: #2aa198 } /* Literal.String.Single */
.ss { color: #2aa198 } /* Literal.String.Symbol */
.bp { color: #268bd2 } /* Name.Builtin.Pseudo */
.fm { color: #268bd2 } /* Name.Function.Magic */
.vc { color: #268bd2 } /* Name.Variable.Class */
.vg { color: #268bd2 } /* Name.Variable.Global */
.vi { color: #268bd2 } /* Name.Variable.Instance */
.vm { color: #268bd2 } /* Name.Variable.Magic */
.il { color: #2aa198 } /* Literal.Number.Integer.Long */
@media print {
footer {page-break-after: always;}
}
</style>
<style id="MJX-CHTML-styles">
mjx-container[jax="CHTML"] {
line-height: 0;
}
mjx-container [space="1"] {
margin-left: .111em;
}
mjx-container [space="2"] {
margin-left: .167em;
}
mjx-container [space="3"] {
margin-left: .222em;
}
mjx-container [space="4"] {
margin-left: .278em;
}
mjx-container [space="5"] {
margin-left: .333em;
}
mjx-container [rspace="1"] {
margin-right: .111em;
}
mjx-container [rspace="2"] {
margin-right: .167em;
}
mjx-container [rspace="3"] {
margin-right: .222em;
}
mjx-container [rspace="4"] {
margin-right: .278em;
}
mjx-container [rspace="5"] {
margin-right: .333em;
}
mjx-container [size="s"] {
font-size: 70.7%;
}
mjx-container [size="ss"] {
font-size: 50%;
}
mjx-container [size="Tn"] {
font-size: 60%;
}
mjx-container [size="sm"] {
font-size: 85%;
}
mjx-container [size="lg"] {
font-size: 120%;
}
mjx-container [size="Lg"] {
font-size: 144%;
}
mjx-container [size="LG"] {
font-size: 173%;
}
mjx-container [size="hg"] {
font-size: 207%;
}
mjx-container [size="HG"] {
font-size: 249%;
}
mjx-container [width="full"] {
width: 100%;
}
mjx-box {
display: inline-block;
}
mjx-block {
display: block;
}
mjx-itable {
display: inline-table;
}
mjx-row {
display: table-row;
}
mjx-row > * {
display: table-cell;
}
mjx-mtext {
display: inline-block;
}
mjx-mstyle {
display: inline-block;
}
mjx-merror {
display: inline-block;
color: red;
background-color: yellow;
}
mjx-mphantom {
visibility: hidden;
}
_::-webkit-full-page-media, _:future, :root mjx-container {
will-change: opacity;
}
mjx-assistive-mml {
position: absolute !important;
top: 0px;
left: 0px;
clip: rect(1px, 1px, 1px, 1px);
padding: 1px 0px 0px 0px !important;
border: 0px !important;
display: block !important;
width: auto !important;
overflow: hidden !important;
-webkit-touch-callout: none;
-webkit-user-select: none;
-khtml-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
mjx-assistive-mml[display="block"] {
width: 100% !important;
}
mjx-math {
display: inline-block;
text-align: left;
line-height: 0;
text-indent: 0;
font-style: normal;
font-weight: normal;
font-size: 100%;
font-size-adjust: none;
letter-spacing: normal;
word-wrap: normal;
word-spacing: normal;
white-space: nowrap;
direction: ltr;
padding: 1px 0;
}
mjx-container[jax="CHTML"][display="true"] {
display: block;
text-align: center;
margin: 1em 0;
}
mjx-container[jax="CHTML"][display="true"][width="full"] {
display: flex;
}
mjx-container[jax="CHTML"][display="true"] mjx-math {
padding: 0;
}
mjx-container[jax="CHTML"][justify="left"] {
text-align: left;
}
mjx-container[jax="CHTML"][justify="right"] {
text-align: right;
}
mjx-mi {
display: inline-block;
text-align: left;
}
mjx-c {
display: inline-block;
}
mjx-utext {
display: inline-block;
padding: .75em 0 .2em 0;
}
mjx-mo {
display: inline-block;
text-align: left;
}
mjx-stretchy-h {
display: inline-table;
width: 100%;
}
mjx-stretchy-h > * {
display: table-cell;
width: 0;
}
mjx-stretchy-h > * > mjx-c {
display: inline-block;
transform: scalex(1.0000001);
}
mjx-stretchy-h > * > mjx-c::before {
display: inline-block;
width: initial;
}
mjx-stretchy-h > mjx-ext {
/* IE */ overflow: hidden;
/* others */ overflow: clip visible;
width: 100%;
}
mjx-stretchy-h > mjx-ext > mjx-c::before {
transform: scalex(500);
}
mjx-stretchy-h > mjx-ext > mjx-c {
width: 0;
}
mjx-stretchy-h > mjx-beg > mjx-c {
margin-right: -.1em;
}
mjx-stretchy-h > mjx-end > mjx-c {
margin-left: -.1em;
}
mjx-stretchy-v {
display: inline-block;
}
mjx-stretchy-v > * {
display: block;
}
mjx-stretchy-v > mjx-beg {
height: 0;
}
mjx-stretchy-v > mjx-end > mjx-c {
display: block;
}
mjx-stretchy-v > * > mjx-c {
transform: scaley(1.0000001);
transform-origin: left center;
overflow: hidden;
}
mjx-stretchy-v > mjx-ext {
display: block;
height: 100%;
box-sizing: border-box;
border: 0px solid transparent;
/* IE */ overflow: hidden;
/* others */ overflow: visible clip;
}
mjx-stretchy-v > mjx-ext > mjx-c::before {
width: initial;
box-sizing: border-box;
}
mjx-stretchy-v > mjx-ext > mjx-c {
transform: scaleY(500) translateY(.075em);
overflow: visible;
}
mjx-mark {
display: inline-block;
height: 0px;
}
mjx-mn {
display: inline-block;
text-align: left;
}
mjx-msup {
display: inline-block;
text-align: left;
}
mjx-mfrac {
display: inline-block;
text-align: left;
}
mjx-frac {
display: inline-block;
vertical-align: 0.17em;
padding: 0 .22em;
}
mjx-frac[type="d"] {
vertical-align: .04em;
}
mjx-frac[delims] {
padding: 0 .1em;
}
mjx-frac[atop] {
padding: 0 .12em;
}
mjx-frac[atop][delims] {
padding: 0;
}
mjx-dtable {
display: inline-table;
width: 100%;
}
mjx-dtable > * {
font-size: 2000%;
}
mjx-dbox {
display: block;
font-size: 5%;
}
mjx-num {
display: block;
text-align: center;
}
mjx-den {
display: block;
text-align: center;
}
mjx-mfrac[bevelled] > mjx-num {
display: inline-block;
}
mjx-mfrac[bevelled] > mjx-den {
display: inline-block;
}
mjx-den[align="right"], mjx-num[align="right"] {
text-align: right;
}
mjx-den[align="left"], mjx-num[align="left"] {
text-align: left;
}
mjx-nstrut {
display: inline-block;
height: .054em;
width: 0;
vertical-align: -.054em;
}
mjx-nstrut[type="d"] {
height: .217em;
vertical-align: -.217em;
}
mjx-dstrut {
display: inline-block;
height: .505em;
width: 0;
}
mjx-dstrut[type="d"] {
height: .726em;
}
mjx-line {
display: block;
box-sizing: border-box;
min-height: 1px;
height: .06em;
border-top: .06em solid;
margin: .06em -.1em;
overflow: hidden;
}
mjx-line[type="d"] {
margin: .18em -.1em;
}
mjx-mrow {
display: inline-block;
text-align: left;
}
mjx-msqrt {
display: inline-block;
text-align: left;
}
mjx-root {
display: inline-block;
white-space: nowrap;
}
mjx-surd {
display: inline-block;
vertical-align: top;
}
mjx-sqrt {
display: inline-block;
padding-top: .07em;
}
mjx-sqrt > mjx-box {
border-top: .07em solid;
}
mjx-sqrt.mjx-tall > mjx-box {
padding-left: .3em;
margin-left: -.3em;
}
mjx-munderover {
display: inline-block;
text-align: left;
}
mjx-munderover:not([limits="false"]) {
padding-top: .1em;
}
mjx-munderover:not([limits="false"]) > * {
display: block;
}
mjx-msubsup {
display: inline-block;
text-align: left;
}
mjx-script {
display: inline-block;
padding-right: .05em;
padding-left: .033em;
}
mjx-script > * {
display: block;
}
mjx-TeXAtom {
display: inline-block;
text-align: left;
}
mjx-msub {
display: inline-block;
text-align: left;
}
mjx-c::before {
display: block;
width: 0;
}
.MJX-TEX {
font-family: MJXZERO, MJXTEX;
}
.TEX-B {
font-family: MJXZERO, MJXTEX-B;
}
.TEX-I {
font-family: MJXZERO, MJXTEX-I;
}
.TEX-MI {
font-family: MJXZERO, MJXTEX-MI;
}
.TEX-BI {
font-family: MJXZERO, MJXTEX-BI;
}
.TEX-S1 {
font-family: MJXZERO, MJXTEX-S1;
}
.TEX-S2 {
font-family: MJXZERO, MJXTEX-S2;
}
.TEX-S3 {
font-family: MJXZERO, MJXTEX-S3;
}
.TEX-S4 {
font-family: MJXZERO, MJXTEX-S4;
}
.TEX-A {
font-family: MJXZERO, MJXTEX-A;
}
.TEX-C {
font-family: MJXZERO, MJXTEX-C;
}
.TEX-CB {
font-family: MJXZERO, MJXTEX-CB;
}
.TEX-FR {
font-family: MJXZERO, MJXTEX-FR;
}
.TEX-FRB {
font-family: MJXZERO, MJXTEX-FRB;
}
.TEX-SS {
font-family: MJXZERO, MJXTEX-SS;
}
.TEX-SSB {
font-family: MJXZERO, MJXTEX-SSB;
}
.TEX-SSI {
font-family: MJXZERO, MJXTEX-SSI;
}
.TEX-SC {
font-family: MJXZERO, MJXTEX-SC;
}
.TEX-T {
font-family: MJXZERO, MJXTEX-T;
}
.TEX-V {
font-family: MJXZERO, MJXTEX-V;
}
.TEX-VB {
font-family: MJXZERO, MJXTEX-VB;
}
mjx-stretchy-v mjx-c, mjx-stretchy-h mjx-c {
font-family: MJXZERO, MJXTEX-S1, MJXTEX-S4, MJXTEX, MJXTEX-A ! important;
}
@font-face /* 0 */ {
font-family: MJXZERO;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Zero.woff") format("woff");
}
@font-face /* 1 */ {
font-family: MJXTEX;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Main-Regular.woff") format("woff");
}
@font-face /* 2 */ {
font-family: MJXTEX-B;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Main-Bold.woff") format("woff");
}
@font-face /* 3 */ {
font-family: MJXTEX-I;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Math-Italic.woff") format("woff");
}
@font-face /* 4 */ {
font-family: MJXTEX-MI;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Main-Italic.woff") format("woff");
}
@font-face /* 5 */ {
font-family: MJXTEX-BI;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Math-BoldItalic.woff") format("woff");
}
@font-face /* 6 */ {
font-family: MJXTEX-S1;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Size1-Regular.woff") format("woff");
}
@font-face /* 7 */ {
font-family: MJXTEX-S2;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Size2-Regular.woff") format("woff");
}
@font-face /* 8 */ {
font-family: MJXTEX-S3;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Size3-Regular.woff") format("woff");
}
@font-face /* 9 */ {
font-family: MJXTEX-S4;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Size4-Regular.woff") format("woff");
}
@font-face /* 10 */ {
font-family: MJXTEX-A;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_AMS-Regular.woff") format("woff");
}
@font-face /* 11 */ {
font-family: MJXTEX-C;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Calligraphic-Regular.woff") format("woff");
}
@font-face /* 12 */ {
font-family: MJXTEX-CB;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Calligraphic-Bold.woff") format("woff");
}
@font-face /* 13 */ {
font-family: MJXTEX-FR;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Fraktur-Regular.woff") format("woff");
}
@font-face /* 14 */ {
font-family: MJXTEX-FRB;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Fraktur-Bold.woff") format("woff");
}
@font-face /* 15 */ {
font-family: MJXTEX-SS;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_SansSerif-Regular.woff") format("woff");
}
@font-face /* 16 */ {
font-family: MJXTEX-SSB;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_SansSerif-Bold.woff") format("woff");
}
@font-face /* 17 */ {
font-family: MJXTEX-SSI;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_SansSerif-Italic.woff") format("woff");
}
@font-face /* 18 */ {
font-family: MJXTEX-SC;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Script-Regular.woff") format("woff");
}
@font-face /* 19 */ {
font-family: MJXTEX-T;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Typewriter-Regular.woff") format("woff");
}
@font-face /* 20 */ {
font-family: MJXTEX-V;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Vector-Regular.woff") format("woff");
}
@font-face /* 21 */ {
font-family: MJXTEX-VB;
src: url("https://cdn.jsdelivr.net/npm/mathjax@3/es5/output/chtml/fonts/woff-v2/MathJax_Vector-Bold.woff") format("woff");
}
mjx-c.mjx-c1D44E.TEX-I::before {
padding: 0.441em 0.529em 0.01em 0;
content: "a";
}
mjx-c.mjx-c2212::before {
padding: 0.583em 0.778em 0.082em 0;
content: "\2212";
}
mjx-c.mjx-c32::before {
padding: 0.666em 0.5em 0 0;
content: "2";
}
mjx-c.mjx-c1D44F.TEX-I::before {
padding: 0.694em 0.429em 0.011em 0;
content: "b";
}
mjx-c.mjx-cB1::before {
padding: 0.666em 0.778em 0 0;
content: "\B1";
}
mjx-c.mjx-c221A::before {
padding: 0.8em 0.853em 0.2em 0;
content: "\221A";
}
mjx-c.mjx-c37::before {
padding: 0.676em 0.5em 0.022em 0;
content: "7";
}
mjx-c.mjx-c33::before {
padding: 0.665em 0.5em 0.022em 0;
content: "3";
}
mjx-c.mjx-c221E::before {
padding: 0.442em 1em 0.011em 0;
content: "\221E";
}
mjx-c.mjx-c2211.TEX-S2::before {
padding: 0.95em 1.444em 0.45em 0;
content: "\2211";
}
mjx-c.mjx-c1D45B.TEX-I::before {
padding: 0.442em 0.6em 0.011em 0;
content: "n";
}
mjx-c.mjx-c3D::before {
padding: 0.583em 0.778em 0.082em 0;
content: "=";
}
mjx-c.mjx-c31::before {
padding: 0.666em 0.5em 0 0;
content: "1";
}
mjx-c.mjx-c1D452.TEX-I::before {
padding: 0.442em 0.466em 0.011em 0;
content: "e";
}
mjx-c.mjx-c1D456.TEX-I::before {
padding: 0.661em 0.345em 0.011em 0;
content: "i";
}
mjx-c.mjx-c1D70B.TEX-I::before {
padding: 0.431em 0.57em 0.011em 0;
content: "\3C0";
}
mjx-c.mjx-c2B::before {
padding: 0.583em 0.778em 0.082em 0;
content: "+";
}
mjx-c.mjx-c28::before {
padding: 0.75em 0.389em 0.25em 0;
content: "(";
}
mjx-c.mjx-c35::before {
padding: 0.666em 0.5em 0.022em 0;
content: "5";
}
mjx-c.mjx-c1D465.TEX-I::before {
padding: 0.442em 0.572em 0.011em 0;
content: "x";
}
mjx-c.mjx-c34::before {
padding: 0.677em 0.5em 0 0;
content: "4";
}
mjx-c.mjx-c29::before {
padding: 0.75em 0.389em 0.25em 0;
content: ")";
}
mjx-c.mjx-c1D6FC.TEX-I::before {
padding: 0.442em 0.64em 0.011em 0;
content: "\3B1";
}
mjx-c.mjx-c1D461.TEX-I::before {
padding: 0.626em 0.361em 0.011em 0;
content: "t";
}
mjx-c.mjx-c1D443.TEX-I::before {
padding: 0.683em 0.751em 0 0;
content: "P";
}
mjx-c.mjx-c1D442.TEX-I::before {
padding: 0.704em 0.763em 0.022em 0;
content: "O";
}
mjx-c.mjx-c2C::before {
padding: 0.121em 0.278em 0.194em 0;
content: ",";
}
mjx-c.mjx-c2026::before {
padding: 0.12em 1.172em 0 0;
content: "\2026";
}
mjx-c.mjx-c1D45E.TEX-I::before {
padding: 0.442em 0.46em 0.194em 0;
content: "q";
}
mjx-c.mjx-c1D446.TEX-I::before {
padding: 0.705em 0.645em 0.022em 0;
content: "S";
}
mjx-c.mjx-c1D706.TEX-I::before {
padding: 0.694em 0.583em 0.012em 0;
content: "\3BB";
}
mjx-c.mjx-c1D458.TEX-I::before {
padding: 0.694em 0.521em 0.011em 0;
content: "k";
}
mjx-c.mjx-c222B.TEX-S2::before {
padding: 1.36em 0.944em 0.862em 0;
content: "\222B";
}
mjx-c.mjx-c30::before {
padding: 0.666em 0.5em 0.022em 0;
content: "0";
}
mjx-c.mjx-c1D451.TEX-I::before {
padding: 0.694em 0.52em 0.01em 0;
content: "d";
}
mjx-c.mjx-c1D438.TEX-I::before {
padding: 0.68em 0.764em 0 0;
content: "E";
}
mjx-c.mjx-c1D45A.TEX-I::before {
padding: 0.442em 0.878em 0.011em 0;
content: "m";
}
mjx-c.mjx-c1D450.TEX-I::before {
padding: 0.442em 0.433em 0.011em 0;
content: "c";
}
</style></head>
<body>
<div class="document" id="formula">
<h1 class="title">Formula</h1>
<p>These should be some in line math <span class="math"><mjx-container class="MathJax CtxtMenu_Attached_0" jax="CHTML" tabindex="0" ctxtmenu_counter="0" style="font-size: 123.4%; position: relative;"><mjx-math class="MJX-TEX" aria-hidden="true"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D44E TEX-I"></mjx-c></mjx-mi><mjx-mo class="mjx-n" space="3"><mjx-c class="mjx-c2212"></mjx-c></mjx-mo><mjx-mn class="mjx-n" space="3"><mjx-c class="mjx-c32"></mjx-c></mjx-mn><mjx-msup><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D44F TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: 0.363em;"><mjx-mn class="mjx-n" size="s"><mjx-c class="mjx-c32"></mjx-c></mjx-mn></mjx-script></mjx-msup></mjx-math><mjx-assistive-mml unselectable="on" display="inline"><math xmlns="http://www.w3.org/1998/Math/MathML"><mi>a</mi><mo>−</mo><mn>2</mn><msup><mi>b</mi><mn>2</mn></msup></math></mjx-assistive-mml></mjx-container></span> here.</p>
<div class="math">
<mjx-container class="MathJax CtxtMenu_Attached_0" jax="CHTML" display="true" tabindex="0" ctxtmenu_counter="1" style="font-size: 123.4%; position: relative;"><mjx-math display="true" class="MJX-TEX" aria-hidden="true" style="margin-left: 0px; margin-right: 0px;"><mjx-mfrac><mjx-frac type="d"><mjx-num><mjx-nstrut type="d"></mjx-nstrut><mjx-mrow><mjx-mn class="mjx-n"><mjx-c class="mjx-c32"></mjx-c></mjx-mn><mjx-mo class="mjx-n" space="3"><mjx-c class="mjx-cB1"></mjx-c></mjx-mo><mjx-msqrt space="3"><mjx-sqrt><mjx-surd><mjx-mo class="mjx-n"><mjx-c class="mjx-c221A"></mjx-c></mjx-mo></mjx-surd><mjx-box style="padding-top: 0.158em;"><mjx-mn class="mjx-n"><mjx-c class="mjx-c37"></mjx-c></mjx-mn></mjx-box></mjx-sqrt></mjx-msqrt></mjx-mrow></mjx-num><mjx-dbox><mjx-dtable><mjx-line type="d"></mjx-line><mjx-row><mjx-den><mjx-dstrut type="d"></mjx-dstrut><mjx-mn class="mjx-n"><mjx-c class="mjx-c33"></mjx-c></mjx-mn></mjx-den></mjx-row></mjx-dtable></mjx-dbox></mjx-frac></mjx-mfrac></mjx-math><mjx-assistive-mml unselectable="on" display="block"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><mfrac><mrow><mn>2</mn><mo>±</mo><msqrt><mn>7</mn></msqrt></mrow><mn>3</mn></mfrac></math></mjx-assistive-mml></mjx-container>
</div>
<p>Some text here, and then some more math:</p>
<div class="math">
<mjx-container class="MathJax CtxtMenu_Attached_0" jax="CHTML" display="true" tabindex="0" ctxtmenu_counter="2" style="font-size: 123.1%; position: relative;"><mjx-math display="true" class="MJX-TEX" aria-hidden="true" style="margin-left: 0px; margin-right: 0px;"><mjx-munderover><mjx-over style="padding-bottom: 0.192em; padding-left: 0.368em;"><mjx-mi class="mjx-n" size="s"><mjx-c class="mjx-c221E"></mjx-c></mjx-mi></mjx-over><mjx-box><mjx-munder><mjx-row><mjx-base><mjx-mo class="mjx-lop"><mjx-c class="mjx-c2211 TEX-S2"></mjx-c></mjx-mo></mjx-base></mjx-row><mjx-row><mjx-under style="padding-top: 0.167em; padding-left: 0.058em;"><mjx-texatom size="s" texclass="ORD"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D45B TEX-I"></mjx-c></mjx-mi><mjx-mo class="mjx-n"><mjx-c class="mjx-c3D"></mjx-c></mjx-mo><mjx-mn class="mjx-n"><mjx-c class="mjx-c31"></mjx-c></mjx-mn></mjx-texatom></mjx-under></mjx-row></mjx-munder></mjx-box></mjx-munderover><mjx-mfrac space="2"><mjx-frac type="d"><mjx-num><mjx-nstrut type="d"></mjx-nstrut><mjx-mrow><mjx-mo class="mjx-n"><mjx-c class="mjx-c2212"></mjx-c></mjx-mo><mjx-msup><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D452 TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: 0.363em;"><mjx-texatom size="s" texclass="ORD"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D456 TEX-I"></mjx-c></mjx-mi><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D70B TEX-I"></mjx-c></mjx-mi></mjx-texatom></mjx-script></mjx-msup></mjx-mrow></mjx-num><mjx-dbox><mjx-dtable><mjx-line type="d"></mjx-line><mjx-row><mjx-den><mjx-dstrut type="d"></mjx-dstrut><mjx-msup><mjx-mn class="mjx-n"><mjx-c class="mjx-c32"></mjx-c></mjx-mn><mjx-script style="vertical-align: 0.289em;"><mjx-mi class="mjx-i" size="s"><mjx-c class="mjx-c1D45B TEX-I"></mjx-c></mjx-mi></mjx-script></mjx-msup></mjx-den></mjx-row></mjx-dtable></mjx-dbox></mjx-frac></mjx-mfrac><mjx-mo class="mjx-n" space="3"><mjx-c class="mjx-c2B"></mjx-c></mjx-mo><mjx-mo class="mjx-n" space="3"><mjx-c class="mjx-c28"></mjx-c></mjx-mo><mjx-mfrac><mjx-frac type="d"><mjx-num><mjx-nstrut type="d"></mjx-nstrut><mjx-mrow><mjx-mn class="mjx-n"><mjx-c class="mjx-c35"></mjx-c></mjx-mn><mjx-mo class="mjx-n" space="3"><mjx-c class="mjx-c2212"></mjx-c></mjx-mo><mjx-mfrac space="3"><mjx-frac><mjx-num><mjx-nstrut></mjx-nstrut><mjx-mn class="mjx-n" size="s"><mjx-c class="mjx-c31"></mjx-c></mjx-mn></mjx-num><mjx-dbox><mjx-dtable><mjx-line></mjx-line><mjx-row><mjx-den><mjx-dstrut></mjx-dstrut><mjx-mi class="mjx-i" size="s"><mjx-c class="mjx-c1D465 TEX-I"></mjx-c></mjx-mi></mjx-den></mjx-row></mjx-dtable></mjx-dbox></mjx-frac></mjx-mfrac></mjx-mrow></mjx-num><mjx-dbox><mjx-dtable><mjx-line type="d"></mjx-line><mjx-row><mjx-den><mjx-dstrut type="d"></mjx-dstrut><mjx-mn class="mjx-n"><mjx-c class="mjx-c34"></mjx-c></mjx-mn></mjx-den></mjx-row></mjx-dtable></mjx-dbox></mjx-frac></mjx-mfrac><mjx-mo class="mjx-n"><mjx-c class="mjx-c29"></mjx-c></mjx-mo></mjx-math><mjx-assistive-mml unselectable="on" display="block"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><munderover><mo data-mjx-texclass="OP">∑</mo><mrow data-mjx-texclass="ORD"><mi>n</mi><mo>=</mo><mn>1</mn></mrow><mi mathvariant="normal">∞</mi></munderover><mfrac><mrow><mo>−</mo><msup><mi>e</mi><mrow data-mjx-texclass="ORD"><mi>i</mi><mi>π</mi></mrow></msup></mrow><msup><mn>2</mn><mi>n</mi></msup></mfrac><mo>+</mo><mo stretchy="false">(</mo><mfrac><mrow><mn>5</mn><mo>−</mo><mfrac><mn>1</mn><mi>x</mi></mfrac></mrow><mn>4</mn></mfrac><mo stretchy="false">)</mo></math></mjx-assistive-mml></mjx-container>
</div>
<p>To get this to work right with rst2pdf, we need to put a blank line between each line of math.</p>
<div class="math">
<mjx-container class="MathJax CtxtMenu_Attached_0" jax="CHTML" display="true" tabindex="0" ctxtmenu_counter="3" style="font-size: 123.4%; position: relative;"><mjx-math display="true" class="MJX-TEX" aria-hidden="true" style="margin-left: 0px; margin-right: 0px;"><mjx-msub><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D6FC TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: -0.15em;"><mjx-mi class="mjx-i" size="s"><mjx-c class="mjx-c1D461 TEX-I"></mjx-c></mjx-mi></mjx-script></mjx-msub><mjx-mo class="mjx-n"><mjx-c class="mjx-c28"></mjx-c></mjx-mo><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D456 TEX-I"></mjx-c></mjx-mi><mjx-mo class="mjx-n"><mjx-c class="mjx-c29"></mjx-c></mjx-mo><mjx-mo class="mjx-n" space="4"><mjx-c class="mjx-c3D"></mjx-c></mjx-mo><mjx-mi class="mjx-i" space="4"><mjx-c class="mjx-c1D443 TEX-I"></mjx-c></mjx-mi><mjx-mo class="mjx-n"><mjx-c class="mjx-c28"></mjx-c></mjx-mo><mjx-msub><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D442 TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: -0.15em;"><mjx-mn class="mjx-n" size="s"><mjx-c class="mjx-c31"></mjx-c></mjx-mn></mjx-script></mjx-msub><mjx-mo class="mjx-n"><mjx-c class="mjx-c2C"></mjx-c></mjx-mo><mjx-msub space="2"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D442 TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: -0.15em;"><mjx-mn class="mjx-n" size="s"><mjx-c class="mjx-c32"></mjx-c></mjx-mn></mjx-script></mjx-msub><mjx-mo class="mjx-n"><mjx-c class="mjx-c2C"></mjx-c></mjx-mo><mjx-mo class="mjx-n" space="2"><mjx-c class="mjx-c2026"></mjx-c></mjx-mo><mjx-msub space="2"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D442 TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: -0.15em;"><mjx-mi class="mjx-i" size="s"><mjx-c class="mjx-c1D461 TEX-I"></mjx-c></mjx-mi></mjx-script></mjx-msub><mjx-mo class="mjx-n"><mjx-c class="mjx-c2C"></mjx-c></mjx-mo><mjx-msub space="2"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D45E TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: -0.15em; margin-left: -0.014em;"><mjx-mi class="mjx-i" size="s"><mjx-c class="mjx-c1D461 TEX-I"></mjx-c></mjx-mi></mjx-script></mjx-msub><mjx-mo class="mjx-n"><mjx-c class="mjx-c29"></mjx-c></mjx-mo><mjx-mo class="mjx-n" space="4"><mjx-c class="mjx-c3D"></mjx-c></mjx-mo><mjx-msub space="4"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D446 TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: -0.15em; margin-left: -0.032em;"><mjx-mi class="mjx-i" size="s"><mjx-c class="mjx-c1D456 TEX-I"></mjx-c></mjx-mi></mjx-script></mjx-msub><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D706 TEX-I"></mjx-c></mjx-mi></mjx-math><mjx-assistive-mml unselectable="on" display="block"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><msub><mi>α</mi><mi>t</mi></msub><mo stretchy="false">(</mo><mi>i</mi><mo stretchy="false">)</mo><mo>=</mo><mi>P</mi><mo stretchy="false">(</mo><msub><mi>O</mi><mn>1</mn></msub><mo>,</mo><msub><mi>O</mi><mn>2</mn></msub><mo>,</mo><mo>…</mo><msub><mi>O</mi><mi>t</mi></msub><mo>,</mo><msub><mi>q</mi><mi>t</mi></msub><mo stretchy="false">)</mo><mo>=</mo><msub><mi>S</mi><mi>i</mi></msub><mi>λ</mi></math></mjx-assistive-mml></mjx-container>
</div>
<div class="math">
<mjx-container class="MathJax CtxtMenu_Attached_0" jax="CHTML" display="true" tabindex="0" ctxtmenu_counter="4" style="font-size: 123.4%; position: relative;"><mjx-math display="true" class="MJX-TEX" aria-hidden="true" style="margin-left: 0px; margin-right: 0px;"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D458 TEX-I"></mjx-c></mjx-mi><mjx-mo class="mjx-n" space="4"><mjx-c class="mjx-c3D"></mjx-c></mjx-mo><mjx-msubsup space="4"><mjx-mo class="mjx-lop"><mjx-c class="mjx-c222B TEX-S2"></mjx-c></mjx-mo><mjx-script style="vertical-align: -0.896em; margin-left: -0.388em;"><mjx-texatom size="s" texclass="ORD" style="margin-left: 0.647em;"><mjx-mn class="mjx-n"><mjx-c class="mjx-c31"></mjx-c><mjx-c class="mjx-c30"></mjx-c></mjx-mn></mjx-texatom><mjx-spacer style="margin-top: 1.498em;"></mjx-spacer><mjx-texatom size="s" texclass="ORD"><mjx-mn class="mjx-n"><mjx-c class="mjx-c30"></mjx-c></mjx-mn></mjx-texatom></mjx-script></mjx-msubsup><mjx-mi class="mjx-i" space="2"><mjx-c class="mjx-c1D465 TEX-I"></mjx-c></mjx-mi><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D451 TEX-I"></mjx-c></mjx-mi><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D465 TEX-I"></mjx-c></mjx-mi></mjx-math><mjx-assistive-mml unselectable="on" display="block"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><mi>k</mi><mo>=</mo><msubsup><mo data-mjx-texclass="OP">∫</mo><mrow data-mjx-texclass="ORD"><mn>0</mn></mrow><mrow data-mjx-texclass="ORD"><mn>10</mn></mrow></msubsup><mi>x</mi><mi>d</mi><mi>x</mi></math></mjx-assistive-mml></mjx-container>
</div>
<div class="math">
<mjx-container class="MathJax CtxtMenu_Attached_0" jax="CHTML" display="true" tabindex="0" ctxtmenu_counter="5" style="font-size: 123.4%; position: relative;"><mjx-math display="true" class="MJX-TEX" aria-hidden="true" style="margin-left: 0px; margin-right: 0px;"><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D438 TEX-I"></mjx-c></mjx-mi><mjx-mo class="mjx-n" space="4"><mjx-c class="mjx-c3D"></mjx-c></mjx-mo><mjx-mi class="mjx-i" space="4"><mjx-c class="mjx-c1D45A TEX-I"></mjx-c></mjx-mi><mjx-msup><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D450 TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: 0.413em;"><mjx-mn class="mjx-n" size="s"><mjx-c class="mjx-c32"></mjx-c></mjx-mn></mjx-script></mjx-msup></mjx-math><mjx-assistive-mml unselectable="on" display="block"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><mi>E</mi><mo>=</mo><mi>m</mi><msup><mi>c</mi><mn>2</mn></msup></math></mjx-assistive-mml></mjx-container>
</div>
<p>More inline math: <span class="math"><mjx-container class="MathJax CtxtMenu_Attached_0" jax="CHTML" tabindex="0" ctxtmenu_counter="6" style="font-size: 123.4%; position: relative;"><mjx-math class="MJX-TEX" aria-hidden="true"><mjx-msup><mjx-mi class="mjx-i"><mjx-c class="mjx-c1D452 TEX-I"></mjx-c></mjx-mi><mjx-script style="vertical-align: 0.363em;"><mjx-mi class="mjx-i" size="s"><mjx-c class="mjx-c1D465 TEX-I"></mjx-c></mjx-mi></mjx-script></mjx-msup></mjx-math><mjx-assistive-mml unselectable="on" display="inline"><math xmlns="http://www.w3.org/1998/Math/MathML"><msup><mi>e</mi><mi>x</mi></msup></math></mjx-assistive-mml></mjx-container></span>.</p>
</div>
</body><span style="--colorFg:#d3d9e3; --colorFgAlpha:#d3d9e31a; --colorFgIntense:#f9ffff; --colorFgFaded:#c9ced8; --colorFgFadedMore:#bec3cd; --colorFgFadedMost:#a6abb3; --colorBg:#2e2f37; --colorBgAlpha:#2e2f37e6; --colorBgAlphaHeavy:#2e2f37a6; --colorBgAlphaHeavier:#2e2f3740; --colorBgAlphaBlur:#2e2f37f2; --colorBgDark:#282931; --colorBgDarker:#23242b; --colorBgLight:#32333b; --colorBgLighter:#42434b; --colorBgLightIntense:#36373f; --colorBgIntense:#23242b; --colorBgIntenser:#1b1c24; --colorBgIntserAlpha:#181920eb; --colorBgInverse:#32333b; --colorBgInverser:#3e3f47; --colorBgFaded:#3a3b43; --backgroundBlur:blur(10px); --colorHighlightBg:#6590fd; --colorHighlightBgUnfocused:#565860; --colorHighlightBgAlpha:#6590fd1a; --colorHighlightBgDark:#4779e3; --colorHighlightFg:#ffffff; --colorHighlightFgAlpha:#ffffff80; --colorHighlightFgAlphaHeavy:#ffffff40; --colorAccentBg:#44454d; --colorAccentBgAlpha:#78798266; --colorAccentBgAlphaHeavy:#78798240; --colorAccentBgDark:#34353d; --colorAccentBgDarker:#1d1e25; --colorAccentBgFaded:#393939; --colorAccentBgFadedMore:#595a62; --colorAccentBgFadedMost:#787982; --colorAccentBorder:#34353d; --colorAccentBorderDark:#2a2b33; --colorAccentFg:#ffffff; --colorAccentFgFaded:#cacacb; --colorAccentFgAlpha:#ffffff40; --colorBorder:#1b1c24; --colorBorderDisabled:#23242b; --colorBorderSubtle:#212229; --colorBorderIntense:#0b0c15; --colorSuccessBg:#06a700; --colorSuccessBgAlpha:#06a7001a; --colorSuccessFg:#ffffff; --colorWarningBg:#efaf00; --colorWarningBgAlpha:#efaf001a; --colorWarningFg:#000000; --colorErrorBg:#c64539; --colorErrorBgAlpha:#c645391a; --colorErrorFg:#ffffff;"></span></html>
@language python
@nosearch
# changed
@language html
<a href="http://127.0.0.1:8000/leo_toc.html">Link</a>
<!DOCTYPE html>
<html>
<head>
<style>
ul.leo-tree-example {
background-color: #ffffec;
zoom: 150%; # blurs icons a bit.
}
ul.leo-tree-example li {
background-repeat: no-repeat;
background-position: 0px 5px;
padding-left: 27px;
}
li {
background-image:
url('https://raw.github.com/vivainio/leo/master/leo/Icons/box00.GIF');
background-repeat: no-repeat;
background-position: 0px 5px;
padding-left: 27px;
}
li.selected {
background-color: lightgrey;
}
li.leaf {
list-style-type: none;
}
li.plus {
list-style-image: url('http://leoeditor.com/plusnode.gif')
}
li.minus {
list-style-image: url('http://leoeditor.com/minusnode.gif')
}
li.leaf {
background-image: url('http://leoeditor.com/box00.GIF')
}
li.body {
background-image: url('http://leoeditor.com/box01.GIF')
}
li.mark {
background-image: url('http://leoeditor.com/box02.GIF')
}
li.mark-body {
background-image: url('http://leoeditor.com/box03.GIF')
}
li.clone {
background-image: url('http://leoeditor.com/box04.GIF')
}
li.clone-body {
background-image: url('http://leoeditor.com/box05.GIF')
}
li.clone-mark {
background-image: url('http://leoeditor.com/box06.GIF')
}
li.clone-mark-body {
background-image: url('http://leoeditor.com/box07.GIF')
}
li.dirty {
background-image: url('http://leoeditor.com/box08.GIF')
}
li.dirty-body {
background-image: url('http://leoeditor.com/box09.GIF')
}
li.dirty-mark {
background-image: url('http://leoeditor.com/box10.GIF')
}
li.dirty-mark-body {
background-image: url('http://leoeditor.com/box11.GIF')
}
li.dirty-clone {
background-image: url('http://leoeditor.com/box12.GIF')
}
li.dirty-clone-body {
background-image: url('http://leoeditor.com/box13.GIF')
}
li.dirty-clone-mark {
background-image: url('http://leoeditor.com/box14.GIF')
}
</style>
</head>
<body>
<ul class="leo-tree-example">
<li class='plus clone-mark'>
test
</li>
<ul>
<li class='plus clone-body'>
child
</li>
<ul>
<li class='leaf body'>
grandchild
</li>
</ul>
<li class='plus clone-body'>
child
</li>
<ul>
<li class='leaf body'>
grandchild
</li>
</ul>
</ul>
</ul>
</body>
</html>
@language html
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<!--@+leo-ver=5-thin-->
<!--@+node:ekr.20100808060203.4273: * @thin html/front.html-->
<!--@@first-->
<!--@@first-->
<!--@@language html-->
<!--@@tabwidth -2-->
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<!--@+<< head >>-->
<!--@+node:ekr.20100808060203.4274: ** << head >>-->
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Leo's Home Page</title>
<link rel="stylesheet" href="_static/default.css" type="text/css" />
<link rel="stylesheet" href="_static/silver_city.css" type="text/css" />
<!--
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '',
VERSION: '4.7-final',
COLLAPSE_MODINDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: false
};
</script>
-->
<!-- <script type="text/javascript" src="_static/jquery.js"></script>-->
<!-- <script type="text/javascript" src="_static/doctools.js"></script>-->
<link rel="top" title="Leo v4.7-final documentation" href="index.html" />
<link rel="Users Guide" title="Leo’s Users Guide" href="leo_toc.html" />
<!-- <link rel="prev" title="Chapter 5: Using Leo’s Commands" href="commands.html" />-->
<!--@-<< head >>-->
</head>
<body>
<!--@+<< div top nav >>-->
<!--@+node:ekr.20100808060203.4276: ** << div top nav >>-->
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="genindex.html" title="General Index"
accesskey="I">index</a></li>
<li class="right" >
<a href="leo_toc.html" title="Leo’s Users Guide"
accesskey="N">contents</a> |</li>
<!--
<li class="right" >
<a href="commands.html" title="Chapter 5: Using Leo’s Commands"
accesskey="P">previous</a> |</li>
-->
<li><a href="leo_toc.html">Leo v4.7-final documentation</a> »</li>
</ul>
</div>
<!--@-<< div top nav >>-->
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body">
<!--@+<< div outer section >>-->
<!--@+node:ekr.20100808060203.4286: ** << div outer section >>-->
<div class="section" id="Leo’s Home page">
<h1>Leo’s Home Page</h1>
Leo is...
<!--@+<< what is Leo list >>-->
<!--@+node:ekr.20100808060203.4288: *3* << what is Leo list >>-->
<li>
An outline-oriented <i>browser</i> and <i>project manager</i> that organizes<br>
programs, web sites, URL's, pictures, movies or any other kind of data.<br>
You can organize your data in as many ways as you like within a <i>single</i> outline.</li>
<li>
A <i>programmer's editor</i> that fully integrates outline structure with programming.
<!--
<a HREF="http://www.eecs.harvard.edu/~nr/noweb/">noweb</a> and
<a HREF="http://www-cs-faculty.stanford.edu/~knuth/cweb.html">CWEB</a> markup.</li>
-->
<li>
<i>Fully scriptable</i> using <a HREF="http://www.python.org/">Python</a>.
Leo's core is 100% pure Python.</li>
<li>
<i>Portable</i>. Leo runs on Windows, Linux and MacOS X.<br>
Leo requires either the
<a HREF="http://docs.python.org/library/tkinter.html">Tkinter</a> or
<a HREF="http://www.riverbankcomputing.co.uk/software/pyqt/intro">PyQt</a> widget set.<br>
Leo's outline files are <a HREF="http://www.w3.org/XML/">XML</a> format.</li>
<li>
<a href="http://www.opensource.org/"><i>Open Software</i></a>,
distributed under the
<a href="http://www.opensource.org/licenses/mit-license.php/">MIT License</a>.</li>
<!--@-<< what is Leo list >>-->
<!-- div body toc -->
<!-- < < inner sections > > -->
<p>Leo has an active community of helpful users and developers.<br>
Please use the <a href="http://groups.google.com/group/leo-editor">leo-editor</a>
group to ask questions and make suggestions.</p>
<p>Leo's author is <a HREF="ekr.html">Edward K. Ream</a></p>
</div>
<!--@-<< div outer section >>-->
</div>
</div>
</div>
<!--@+<< sidebar >>-->
<!--@+node:ekr.20100808060203.4282: ** << sidebar >>-->
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
<!--@+<< sidebar contents >>-->
<!--@+node:ekr.20100808060203.4283: *3* << sidebar contents >>-->
<p class="logo"><a href="leo_toc.html">
<img class="logo" src="_static/Leo4-80-border.jpg" alt="Logo"/></a></p>
<a class="reference external" href="intro.html">
Read this tutorial first</a><br>
<a class="reference external" href="http://www.3dtree.com/ev/e/sbooks/leo/sbframetoc_ie.htm">
Another tutorial</a><br>
<a class="reference external" href="http:screen-shots.html">
Screen shots of Leo</a><br>
<a class="reference external" href="testimonials.html">
Quotes from Leo’s Users</a><br>
<a class="reference external"
href="http://sourceforge.net/project/showfiles.php?group_id=3458&package_id=29106">
Download Leo</a><br>
<a class="reference external" href="http://groups.google.com/group/leo-editor">
leo-editor: Google Groups</a><br>
<a class="reference external" href="http://launchpad.net/leo-editor">
leo-editor: Launchpad</a><br>
<a class="reference external" href="http://leo.zwiki.org">
Leo’s Wiki</a><br>
<a class="reference external" href="http://www.mind-mapping.org/">
Mind Mapping</a><br>
<a class="reference external" href="leoLinks.html">
More links...</a><br>
<!-- <a class="reference external" href="install.html">Installing Leo</a><br>-->
<!-- <a class="reference external" href="intro.html">Beginners Guide</a><br>-->
<!-- <a class="reference external" href="leo_toc.html">Users Guide</a><br>-->
<!-- <a class="reference external" href="FAQ.html">FAQ</a><br>-->
<!--Leo’s <a class="reference external" href="http://www.greygreen.org/leo/">Daily snapshots</a><br>-->
<!--
<h5>Edward K. Ream</h5>
<a class="reference external" href="ekr.html">Home</a><br>
<a class="reference external" href="mailto:edreamleo@gmail.com">Contact</a><br>
-->
<!--@-<< sidebar contents >>-->
<!-- < < sidebar links > > -->
<!-- < < searchbox > > -->
</div>
</div>
<!--@-<< sidebar >>-->
<div class="clearer"></div>
</div>
<!--@+<< div bottom nav >>-->
<!--@+node:ekr.20100808060203.4277: ** << div bottom nav >>-->
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="genindex.html" title="General Index"
>index</a></li>
<li class="right" >
<a href="leo_toc.html" title="Leo’s Users Guide"
>contents</a> |</li>
<!--
<li class="right" >
<a href="commands.html" title="Chapter 5: Using Leo’s Commands"
>previous</a> |</li>
-->
<li><a href="leo_toc.html">Leo v4.7-final documentation</a> »</li>
</ul>
</div>
<!--@-<< div bottom nav >>-->
<!--@+<< div footer >>-->
<!--@+node:ekr.20100808060203.4278: ** << div footer >>-->
<div class="footer">
© Copyright 2010, Edward K. Ream.
Last updated on Aug 08, 2010.
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 0.6.3.
</div>
<!--@-<< div footer >>-->
</body>
</html>
<!--@-leo-->
from leo.core.leoQt import QtGui
import leo.plugins.demo as demo_module
import imp
imp.reload(demo_module)
C:\Repos\leo-editor\leo\Icons\LeoLogo.svg
@language rest
@wrap
Leosplash.GIF
#
# a bar plot with errorbars
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind + width, womenMeans, width, color='y', yerr=womenStd)
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind + width)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend((rects1[0], rects2[0]), ('Men', 'Women'))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
# http://matplotlib.org/1.5.1/examples/animation/basic_example.html
def update_line(num, data, line):
line.set_data(data[..., :num])
return line, # a tuple.
fig1 = plt.figure()
data = np.random.rand(2, 100) ### 2, 25)
# g.printObj(data)
l, = plt.plot([], [], 'r-')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('x')
plt.title('test')
line_ani = animation.FuncAnimation(fig1, update_line, 25,
fargs=(data, l), interval=50, blit=True)
# Retain a reference to animation.
g.app.permanentScriptDict['pyplot_animations'] = animation
# http://matplotlib.org/1.5.1/examples/animation/basic_example.html
fig2 = plt.figure()
x = np.arange(-9, 10)
y = np.arange(-9, 10).reshape(-1, 1)
base = np.hypot(x, y)
images = []
for add in np.arange(15):
images.append((plt.pcolor(x, y, base + add, norm=plt.Normalize(0, 30)),))
animation = animation.ArtistAnimation(fig2, images,
interval=50,
repeat_delay=30,
blit=True)
# Retain a reference to animation.
g.app.permanentScriptDict['pyplot_animations'] = animation
# http://matplotlib.org/1.5.1/examples/animation/animate_decay.html
# import numpy as np
# import matplotlib.pyplot as plt
# import matplotlib.animation as animation
@others
# global data.
fig, ax = plt.subplots()
line, = ax.plot([], [], lw=2)
ax.grid()
xdata, ydata = [], []
animation = animation.FuncAnimation(
fig, run, data_gen, blit=False, interval=10,
repeat=False, init_func=init,
cache_frame_data=False,
)
# Retain a reference to animation.
g.app.permanentScriptDict['pyplot_animations'] = animation
def data_gen(t=0):
for n in range(500): # n not used.
t += 0.1
yield t, np.sin(2*np.pi*t) * np.exp(-t/10.0)
class MyDemo (demo_module.Demo):
@others
def init():
ax.set_ylim(-1.1, 1.1)
ax.set_xlim(0, 10)
del xdata[:]
del ydata[:]
line.set_data(xdata, ydata)
return line,
def run(data):
# update the data
t, y = data
xdata.append(t)
ydata.append(y)
xmin, xmax = ax.get_xlim()
if t >= xmax:
ax.set_xlim(xmin, 2*xmax)
ax.figure.canvas.draw()
line.set_data(xdata, ydata)
return line,
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
# Grpah 1.
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'ko-')
plt.title('A tale of 2 subplots')
plt.ylabel('Damped oscillation')
# Graph 2.
plt.subplot(2, 1, 2)
plt.plot(x2, y2, 'r.-')
plt.xlabel('time (s)')
plt.ylabel('Undamped')
@nocolor-node
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg width="20cm" height="15cm" viewBox="0 0 800 600"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlik/"
baseProfile="tiny" version="1.2">
<title>Spheres</title>
<desc>Semi-transparent bubbles on a colored background.</desc>
<defs>
<!-- Create radial gradients for each bubble. -->
<radialGradient id="blueBubble" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" stop-opacity="1" />
<stop offset="25%" stop-color="#cdcdff" stop-opacity=".65" />
<stop offset="100%" stop-color="#cdaacd" stop-opacity=".75" />
</radialGradient>
<radialGradient id="redBubble" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" stop-opacity="1" />
<stop offset="25%" stop-color="#ffcdcd" stop-opacity=".65" />
<stop offset="100%" stop-color="#bbbb99" stop-opacity=".75" />
</radialGradient>
<radialGradient id="greenBubble" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" stop-opacity="1" />
<stop offset="25%" stop-color="#cdffcd" stop-opacity=".65" />
<stop offset="100%" stop-color="#99aaaa" stop-opacity=".75" />
</radialGradient>
<radialGradient id="yellowBubble" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" stop-opacity="1" />
<stop offset="25%" stop-color="#ffffcd" stop-opacity=".65" />
<stop offset="100%" stop-color="#bbbbaa" stop-opacity=".75" />
</radialGradient>
<radialGradient id="background" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="400" fx="250" fy="250">
<stop offset="0%" stop-color="#ffffee" />
<stop offset="100%" stop-color="#ccccaa" />
</radialGradient>
<linearGradient id="surface" gradientUnits="userSpaceOnUse"
x1="-100" y1="200" x2="400" y2="200">
<stop offset="0%" stop-color="#ffffcc" />
<stop offset="100%" stop-color="#bbbb88" />
</linearGradient>
<!-- Create radial gradients for each circle to make them look like
spheres. -->
<radialGradient id="blueSphere" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" />
<stop offset="75%" stop-color="blue" />
<stop offset="100%" stop-color="#222244" />
</radialGradient>
<radialGradient id="redSphere" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" />
<stop offset="75%" stop-color="red" />
<stop offset="100%" stop-color="#442222" />
</radialGradient>
<radialGradient id="greenSphere" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" />
<stop offset="75%" stop-color="green" />
<stop offset="100%" stop-color="#113311" />
</radialGradient>
<radialGradient id="yellowSphere" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="-50">
<stop offset="0%" stop-color="white" />
<stop offset="75%" stop-color="yellow" />
<stop offset="100%" stop-color="#444422" />
</radialGradient>
<radialGradient id="shadowGrad" gradientUnits="userSpaceOnUse"
cx="0" cy="0" r="100" fx="-50" fy="50">
<stop offset="0%" stop-color="black" stop-opacity="1.0" />
<stop offset="100%" stop-color="black" stop-opacity="0.0" />
</radialGradient>
<!-- Define a shadow for each sphere. -->
<circle id="shadow" fill="url(#shadowGrad)" cx="0" cy="0" r="100" />
<g id="bubble">
<circle fill="black" cx="0" cy="0" r="50" />
<circle fill="#a6ce39" cx="0" cy="0" r="33" />
<path fill="black" d="M 37,50 L 50,37 L 12,-1 L 22,-11 L 10,-24 L -24,10
L -11,22 L -1,12 Z" />
<circle cx="0" cy="0" r="100" />
</g>
</defs>
<g>
<rect fill="url(#background)" x="0" y="0" width="800" height="600" />
</g>
<g transform="translate(200,700)">
<use xlink:href="#bubble" fill="url(#blueBubble)" />
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="1s" dur="10s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(315,700)">
<g transform="scale(0.5,0.5)">
<use xlink:href="#bubble" fill="url(#redBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="3s" dur="7s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(80,700)">
<g transform="scale(0.65,0.65)">
<use xlink:href="#bubble" fill="url(#greenBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="5s" dur="9s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(255,700)">
<g transform="scale(0.3,0.3)">
<use xlink:href="#bubble" fill="url(#yellowBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="2s" dur="6s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(565,700)">
<g transform="scale(0.4,0.4)">
<use xlink:href="#bubble" fill="url(#blueBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="4s" dur="8s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(715,700)">
<g transform="scale(0.6,0.6)">
<use xlink:href="#bubble" fill="url(#redBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="1s" dur="4s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(645,700)">
<g transform="scale(0.375,0.375)">
<use xlink:href="#bubble" fill="url(#greenBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="0s" dur="11s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(555,700)">
<g transform="scale(0.9,0.9)">
<use xlink:href="#bubble" fill="url(#yellowBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="3s" dur="7.5s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(360,700)">
<g transform="scale(0.5,0.5)">
<use xlink:href="#bubble" fill="url(#blueBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="3s" dur="6s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(215,700)">
<g transform="scale(0.45,0.45)">
<use xlink:href="#bubble" fill="url(#redBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="5.5s" dur="7s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(420,700)">
<g transform="scale(0.75,0.75)">
<use xlink:href="#bubble" fill="url(#greenBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="1s" dur="9s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(815,700)">
<g transform="scale(0.6,0.6)">
<use xlink:href="#bubble" fill="url(#yellowBubble)" />
</g>
<animateTransform attributeName="transform" type="translate" additive="sum"
values="0,0; 0,-800" begin="2s" dur="9.5s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(225,375)" >
<g transform="scale(1.0,0.5)" >
<path d="M 0 0 L 350 0 L 450 450 L -100 450 z"
fill="url(#surface)" stroke="none" />
</g>
</g>
<g transform="translate(200,0)" >
<g transform="translate(200,490) scale(2.0,1.0) rotate(45)" >
<rect fill="#a6ce39" x="-69" y="-69" width="138" height="138" />
<circle fill="black" cx="0" cy="0" r="50" />
<circle fill="#a6ce39" cx="0" cy="0" r="33" />
<path fill="black" d="M 37,50 L 50,37 L 12,-1 L 22,-11 L 10,-24 L -24,10
L -11,22 L -1,12 Z" />
<animateTransform attributeName="transform" type="rotate" additive="sum" values="0; 360"
begin="0s" dur="10s" fill="freeze" repeatCount="indefinite" />
</g>
<g transform="translate(200,375)">
<use xlink:href="#shadow" transform="translate(25,55) scale(1.0,0.5)" />
<circle fill="url(#blueSphere)" cx="0" cy="0" r="100" />
</g>
<g transform="translate(315,440)">
<g transform="scale(0.5,0.5)">
<use xlink:href="#shadow" transform="translate(25,55) scale(1.0,0.5)" />
<circle fill="url(#redSphere)" cx="0" cy="0" r="100" />
</g>
</g>
<g transform="translate(80,475)">
<g transform="scale(0.65,0.65)">
<use xlink:href="#shadow" transform="translate(25,55) scale(1.0,0.5)" />
<circle fill="url(#greenSphere)" cx="0" cy="0" r="100" />
</g>
</g>
<g transform="translate(255,525)">
<g transform="scale(0.3,0.3)">
<use xlink:href="#shadow" transform="translate(25,55) scale(1.0,0.5)" />
<circle fill="url(#yellowSphere)" cx="0" cy="0" r="100" />
</g>
</g>
</g>
</svg>
C:\Repos\leo-editor\leo\Icons\bubbles.svg
@language rest
@wrap
''' Creates a window for *live* rendering of rst, html, etc. (Qt only).
Commands
========
viewrendered.py creates the following (``Alt-X``) commands:
``viewrendered``
opens a new window where the current body text is rendered as HTML
(if it starts with '<'), or otherwise reStructuredText.
``viewrendered-big``
as above, but zoomed in, useful for presentations
``viewrendered-html``
displays the html source generated from reStructuredText, useful for
debugging
``viewrendered`` sets the process current directory (os.chdir()) to the path
to the node being rendered, to allow relative paths to work in
``.. image::`` directives.
reStructuredText errors and warnings may be shown. For example, both::
Heading
-------
`This` is **really** a line of text.
and::
<h1>Heading<h1>
<tt>This</tt> is <b>really</b> a line of text.
will look something like:
**Heading**
`This` is **really** a line of text.
Settings
========
@string view-rendered-default-kind = rst
----------------------------------------
The default kind of rendering. One of (big,rst,html)
@bool view-rendered-auto-create = False
---------------------------------------
When True, the plugin will create a rendering pane automatically.
'''
@language md
# Title
This is **bold** and *italics*.
More info at [Leo's home page](<http://leoeditor.com/).
### Section
- Bullet list
- Another item
- Third item.
Inline:
\int_{b^a} \, x^2 \, dx: :math:`\int_{b^a} \, x^2 \, dx`
d/dx \int_{0^x}\,f(u)\,du=f(x): :math:`d/dx \int_{0^x}\,f(u)\,du=f(x)`
\int_{0^x}: :math:`\int_{0^x}`
block:
.. math::
x + 5 = -3
\int_{b^a} \, x^2 \, dx
d / dx \, \left(\int_{0^x} f(u)\,du\right)=f(x)
d / dx (\int_{0^x} f(u)\,du ) = f(x)
y_k(\mathbf{x},\mathbf{w}) = p(C_1|\mathbf{x})
Alignment with \\\\ in \\begin{equation}:
.. math::
\begin{equation}
x + 5 = -3 \\
\int_{b^a} \, x^2 \, dx\\
d / dx \left(\int_{0^x} f(u)\,du\right)=f(x)\\
d / dx (\int_{0^x} f(u)\,du) = f(x)\\
y_k(\mathbf{x},\mathbf{w}) = p(C_1|\mathbf{x})
\end{equation}
Inline: :math:`y_k(\mathbf{x},\mathbf{w}) = p(C_1|\mathbf{x})`
Inline: :math:`(\sum_{t=0}^{N}\,f(t,k))/N`
The final output will be a vector :math:`Y` with :math:`K` elements, :math:`y_k`, where
:math:`y_k(\mathbf{x},\mathbf{w}) = p(C_1|\mathbf{x})` is the probability that node :math:`k` is in class :math:`C_1` and
:math:`p(C_2|\mathbf{x}) = 1-p(C_1|\mathbf{x})`
def setup(self, p=None):
c = self.c
self.end_on_exception = True # Good for debugging.
self.delta = 10
demo.set_text_delta(10)
# self.geometry1 = self.get_top_geometry()
# g.trace(self.get_top_geometry())
p = g.findNodeAnywhere(c, 'Demo Area')
self.root = p.copy() if p else None
if p:
p.expand()
c.selectPosition(p)
# c.frame.equalSizedPanes()
c.redraw()
self.set_youtube_position()
@language md
- Item
[link](xxx)
@language rest
#####
Title
#####
This is **bold** and *italics*.
More info at `Leo's home page <http://leoeditor.com/>`_.
Section
=======
- Bullet list
- Another item
- Third item.
@language rest
@wrap
- test2
*italic*
**bold**
@language python
@language python
def foo():
pass
@language rest
@wrap
code::
print('Hello world')
for i in 0,1:
print(i)
@language md
@wrap
[Leo's home page](https://leo-editor.github.io/leo-editor/)
@language rest
@wrap
`Leo's home page <https://leo-editor.github.io/leo-editor/>`_
@language rest
@wrap
`EKR mail <http://mail.google.com/mail/#inbox>`_
def setup_script(self):
self.delete_widgets()
@language md
@wrap
[weather.com](http://www.weather.com/weather/today/Madison+WI+53705?from=hp_promolocator&lswe=53705&lwsa=Weather36HourHealthCommand)
#
# a bar plot with errorbars
# import matplotlib # Make *sure* this is imported.
import matplotlib.pyplot as plt
import numpy as np
# import matplotlib.animation as animation
#
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
c._matplot.connect_ax_to_widget(plt)
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind + width, womenMeans, width, color='y', yerr=womenStd)
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind + width)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend((rects1[0], rects2[0]), ('Men', 'Women'))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.draw()
@language md
https://github.com/leo-editor/leo-editor/issues/1231


@language latex
% VR: latex rendering not ready for Python 3.
% VR3: doesn't work
\begin{align}
\nabla \times \vec{\mathbf{B}} -\, \frac1c\, \frac{\partial\vec{\mathbf{E}}}{\partial t} & = \frac{4\pi}{c}\vec{\mathbf{j}} \\
\nabla \cdot \vec{\mathbf{E}} & = 4 \pi \rho \\
\nabla \times \vec{\mathbf{E}}\, +\, \frac1c\, \frac{\partial\vec{\mathbf{B}}}{\partial t} & = \vec{\mathbf{0}} \\
\nabla \cdot \vec{\mathbf{B}} & = 0
\end{align}
Quaternions:
1 * x == x (identity)
x * 1 == x (identity)
-1 * x == -x (math)
x * -1 == -x (math)
i**2 = j**2 = j**2 == -1
From the diagram
red:
i*i = -1
j*i = k
k*i = -j
-j*i = -k
-i*i = 1
-k*i = j
blue:
i*j = -k
j*j = -1
k*j = i
-k*j = -i
-i*j = k
-j*j = 1
i*j = -k = -j*i
k*i = -j = -(k*i)
In short
x*y = -y*x
1 i j k -1 -i -j -k
==========================================================
1 1 i j k -1 -i -j -k
i i*1 -1 -k i*(j*i) i*-1 i*-i i*-j -i*(j*i)
i -1 -1 1
j j
k k
-1 -1
-i -i
-j -j
-k -k
'''Create multiplication table from identities & assoc law.'''
g.cls()
names = "e a a2 b b2 c c2 d d2 x y z".split()
knowns = [
# e any any, any e any
'a a a2',
'a2 a e',
'b a c2',
'b2 a y',
'c a d2',
'c2 a x',
'd a b2',
'd2 a z',
'x a b',
'y a d',
'z a c',
#
'a x c',
'a2 x b2',
'b x d',
'b2 x a2',
'c x a',
'c2 x d2',
'd x b',
'd2 x c2',
'x x e',
'y x z',
'z x y',
]
@others
x = Table(names, knowns)
class Table:
def __init__(self, names, knowns):
self.n = n = len(names)
self.max_n = max([len(z) for z in names])
assert names[0] in '1e'
self.names = names
self.cols = {}
# Keys are names, values are dicts.
self.rows = {}
# Keys are names, values are dicts.
self.known_cols = []
self.init(knowns)
self.solve()
self.print_rows()
@others
def init(self, knowns):
'''Init the row dicts.'''
for row in names:
self.rows[row] = d = {}
for col in names:
if col in '1e': d[col] = row
elif row in '1e': d[col] = col
else: d[col] = '?'
# Add the known values.
known_cols, known_rows = set(), set()
for z in knowns:
row, col, val = z.split()
assert row in names, row
assert col in names, col
assert val in names, val
d = self.rows.get(row)
d[col] = val
known_cols.add(col)
known_rows.add(row)
# Invert the row dict to create the column dict.
for col in names:
self.cols[col] = d = {}
for row in names:
d2 = self.rows[row]
d [row] = d2 [col]
# g.trace('known_rows',sorted(known_rows))
self.known_cols = list(sorted(known_cols))
g.trace('know_cols',self.known_cols)
self.print_rows()
def teardown(self):
c = self.c
self.delete_all_widgets()
# self.set_top_geometry(self.geometry1)
# g.trace(self.get_top_geometry())
if hasattr(self, 'delta') and self.delta > 0:
self.set_text_delta(-self.delta)
if self.root and c.positionExists(self.root, trace=False):
self.root.deleteAllChildren()
p = c.lastTopLevel()
p.expand()
c.selectPosition(p)
c.redraw()
def print_col(self, name):
d = self.cols.get(name)
aList = ['%2s' % d.get(z) for z in self.names]
print(' %2s: %s' % (name, ' '.join(aList)))
def print_col_header(self):
aList = ['%2s' % z for z in self.names]
print('cols:%s' % ' '.join(aList))
n = 3 * len(self.names)
print(' %s' % ('-' * (n-1)))
def print_row(self, name):
d = self.rows.get(name)
aList = ['%2s' % d.get(z) for z in self.names]
print('%2s: %s' % (name, ' '.join(aList)))
def print_rows(self):
aList = ['%2s' % z for z in self.names]
print(' %s' % ' '.join(aList))
print(' %s' % ('-' * (3*len(self.names)-1)))
for row in self.names:
self.print_row(row)
def solve(self):
'''Solve all unknown columns.'''
col1 = self.known_cols[0]
col2 = self.known_cols[-1]
if 0:
self.print_col_header()
self.print_col(col1)
self.print_col(col2)
d1 = self.cols.get(col1)
d2 = self.cols.get(col2)
unsolved = [z for z in self.names if z not in ('1', 'e', col1, col2)]
# For every unsolved column x.
for x in unsolved:
x_d = self.cols.get(x)
# Find y such that x = y * col1
for y in self.names:
val = d1.get(y)
if val == x:
g.trace('col: %2s = %2s * %s' % (x, y, col1))
break
else:
g.trace('can not happen: no value for', x)
return
# Compute all other values for column x using y.
# w * x = w * (y * col1)
# break ### testing
print('===== solved =====')
::
e a b b2 c c2 d d2 x y z
e a xa da za ba ya ca c2a d2a b2a
e e a b b2 c c2 d d2 x y z
a a a2 d2 ? ? ? ? ? c ? ?
a2 a2 e y ? ? ? ? ? b2 ? ?
b b c2 b2 ? ? ? ? ? d ? ?
b2 b2 y e ? ? ? ? ? a2 ? ?
c c d2 a2 ? ? ? ? ? a ? ?
c2 c2 x z ? ? ? ? ? d2 ? ?
d d b2 ? ? ? ? ? ? b ? ?
d2 d2 z ? ? ? ? ? ? c2 ? ?
x x b ? ? ? ? ? ? e ? ?
y y d ? ? ? ? ? ? z ? ?
z z c ? ? ? ? ? ? y ? ?
red arrow: a
blue arrow: x
# b column
b = xa
# X * b = X(xa) = (Xx)a
a * b = a(xa) = (ax)a = ca = d2
a2 * b = a2(xa) = (a2x)a = b2a = y
b * b = b(xa) = (bx)a = da = b2
b2 * b = b2(xa) = (b2x)a = a2a = e
c * b = c(xa) = (cx)a = aa = a2
c2 * b = c2(xa) = (c2x)a = d2a = z
d * b = d(xa) = (dx)a = ba = c2
# identity: e = a2a
# a2 column: a2 = aa
# b2 column: b2 = da
# c column: c = za
# c2 column: c2 = ba
# d column: d = ya
# d2 column: d2 = ca
# x column: x = c2a
# y column: y = b2a
# z column: z = d2a
g.cls()
import numpy as np
@others
i = 0
while i < len(group):
a = group[i]
for b in group:
b = np.dot(a, b)
for c in group:
if np.array_equal(b, c):
break
else:
# print('i: %s Added\n%s' % (i, b))
group.append(b)
i += 1
assert len(group) == 8, len(group)
# Print the group table:
print(' '*8 + ' '.join([row1(m) for m in group]))
print(' '*8 + ' '.join([row2(m) for m in group]))
print(' '*8 + '=' * 70)
for i in range(8):
row = group[i]
print('%s %s' % (
row1(row),
' '.join([row1(np.dot(row,m)) for m in group])))
print('%s %s' % (
row2(row),
' '.join([row2(np.dot(row,m)) for m in group])))
print('')
a0 = np.matrix((
(1, 0),
(0, 1),
))
a1 = np.matrix((
(0, 1),
(1, 0),
))
a2 = np.matrix((
(1, 0),
(0, -1)
))
group = [a0, a1, a2]
def row1(matrix):
return '[%2s %2s]' % (matrix[0,0], matrix[0,1])
def row2(matrix):
return '[%2s %2s]' % (matrix[1,0], matrix[1,1])
Hello World!
c:\test\JinjaTemplates\TestTemplate.txt
def teardown_script(self):
if self.auto_run:
# default wait.
self.wait(0.5)
World
def update_jinja(self, s, keywords):
pc = self
h = self.c.p.h
p = self.c.p
c = self.c
oldp = None
#print "try act"
if not h.startswith('@jinja'):
#print("Not a @jinja node")
return
def find_root(p):
for newp in p.parents():
if newp.h.strip() == '@jinja':
oldp, p = p, newp
#print("Found @jinja node")
return oldp, p
def find_inputs(p):
for newp in p.parents():
if newp.h.strip() == '@jinja inputs':
oldp, p = p, newp
_, p = find_root(p)
return oldp, p
# if on jinja node's children, find the parent
if h.strip() == '@jinja template' or h.strip() == '@jinja inputs':
# not at @jinja, find from parents
oldp, p = find_root(p)
elif h.startswith('@jinja variable'):
# not at @jinja, first find @jinja inputs, then @jinja
oldp, p = find_inputs(p)
def untangle(c,p):
return g.getScript(c,p,
useSelectedText=False,
useSentinels=False)
template_data = {}
for child in p.children():
if child.h == '@jinja template':
template_path = g.os_path_finalize_join(c.getNodePath(p), untangle(c, child).strip())
#print("template_path: ", template_path)
elif child.h == '@jinja inputs':
for template_var_node in child.children():
template_data[template_var_node.h.replace('@jinja variable', '').strip()] = untangle(c, template_var_node).strip()
#print("template_data: ", template_data)
if not template_path:
g.es("No template_path given. Your @jinja node should contain a child node 'template' with the path to the template (relative or absolute)")
return
#print "act"
tmpl = Template(Path(template_path).read_text())
out = tmpl.render(template_data)
w = pc.ensure_text_widget()
pc.show()
w.setPlainText(out)
p.b = out
c.redraw(p)
# focus back on entry node
if oldp:
c.redraw(oldp)
from jinja2 import Template
from markupsafe import escape
# template = Template('Hello {{ name }}!')
path = r'c:\test\JinjaTemplates\TestTemplate.txt'
s = open(path, 'r').read()
template = Template(s)
s = template.render(name='John Doe')
print(s)
print(escape('<em>Hello</em>'))
class BaseCellExporter:
"""A class that represent a notebook cell as text"""
@others
default_comment_magics = None
parse_cell_language = True
def __init__(self, cell, default_language, fmt=None, unsupported_keys=None):
self.fmt = fmt or {}
self.ext = self.fmt.get("extension")
self.cell_type = cell.cell_type
self.source = cell_source(cell)
self.unfiltered_metadata = cell.metadata
self.metadata = filter_metadata(
cell.metadata,
self.fmt.get("cell_metadata_filter"),
_IGNORE_CELL_METADATA,
unsupported_keys=unsupported_keys,
)
if self.parse_cell_language:
custom_cell_magics = self.fmt.get("custom_cell_magics", "").split(",")
self.language, magic_args = cell_language(
self.source, default_language, custom_cell_magics
)
if magic_args:
self.metadata["magic_args"] = magic_args
else:
self.language = None
if self.language and not self.ext.endswith(".Rmd"):
self.metadata["language"] = self.language
self.language = self.language or cell.metadata.get("language", default_language)
self.default_language = default_language
self.comment = _SCRIPT_EXTENSIONS.get(self.ext, {}).get("comment", "#")
self.comment_suffix = _SCRIPT_EXTENSIONS.get(self.ext, {}).get(
"comment_suffix", ""
)
self.comment_magics = self.fmt.get(
"comment_magics", self.default_comment_magics
)
self.cell_metadata_json = self.fmt.get("cell_metadata_json", False)
self.use_runtools = self.fmt.get("use_runtools", False)
self.doxygen_equation_markers = self.fmt.get("doxygen_equation_markers", False)
# how many blank lines before next cell
self.lines_to_next_cell = cell.metadata.get("lines_to_next_cell")
self.lines_to_end_of_cell_marker = cell.metadata.get(
"lines_to_end_of_cell_marker"
)
if (
cell.cell_type == "raw"
and "active" not in self.metadata
and not any(
tag.startswith("active-") for tag in self.metadata.get("tags", [])
)
):
self.metadata["active"] = ""
def is_code(self):
"""Is this cell a code cell?"""
if self.cell_type == "code":
return True
if (
self.cell_type == "raw"
and "active" in self.metadata
or any(tag.startswith("active-") for tag in self.metadata.get("tags", []))
):
return True
return False
def use_triple_quotes(self):
"""Should this markdown cell use triple quote?"""
if "cell_marker" not in self.unfiltered_metadata:
return False
cell_marker = self.unfiltered_metadata["cell_marker"]
if cell_marker in ['"""', "'''"]:
return True
if "," not in cell_marker:
return False
left, right = cell_marker.split(",")
return left[:3] == right[-3:] and left[:3] in ['"""', "'''"]
def cell_to_text(self):
"""Return the text representation for the cell"""
# Trigger cell marker in case we are using multiline quotes
if self.cell_type != "code" and not self.metadata and self.use_triple_quotes():
self.metadata["cell_type"] = self.cell_type
# Go notebooks have '%%' or '%% -' magic commands that need to be escaped
if self.default_language == "go" and self.language == "go":
self.source = [
re.sub(r"^(//\s*)*(%%\s*$|%%\s+-.*$)", r"\1//gonb:\2", line)
for line in self.source
]
if self.is_code():
return self.code_to_text()
source = copy(self.source)
if not self.comment:
escape_code_start(source, self.ext, None)
return self.markdown_to_text(source)
def markdown_to_text(self, source):
"""Escape the given source, for a markdown cell"""
cell_markers = self.unfiltered_metadata.get(
"cell_marker", self.fmt.get("cell_markers")
)
if cell_markers:
if "," in cell_markers:
left, right = cell_markers.split(",", 1)
else:
left = cell_markers + "\n"
if cell_markers.startswith(("r", "R")):
cell_markers = cell_markers[1:]
right = "\n" + cell_markers
if (
left[:3] == right[-3:]
or (left[:1] in ["r", "R"] and left[1:4] == right[-3:])
) and right[-3:] in ['"""', "'''"]:
# Markdown cells that contain a backslash should be encoded as raw strings
if (
left[:1] not in ["r", "R"]
and "\\" in "\n".join(source)
and self.fmt.get("format_name") == "percent"
):
left = "r" + left
source = copy(source)
source[0] = left + source[0]
source[-1] = source[-1] + right
return source
if (
self.comment
and self.comment != "#'"
and is_active(self.ext, self.metadata)
and self.fmt.get("format_name") not in ["percent", "hydrogen"]
):
source = copy(source)
comment_magic(
source,
self.language,
self.comment_magics,
explicitly_code=self.cell_type == "code",
)
return comment_lines(source, self.comment, self.comment_suffix)
def code_to_text(self):
"""Return the text representation of this cell as a code cell"""
raise NotImplementedError("This method must be implemented in a sub-class")
def remove_eoc_marker(self, text, next_text):
"""Remove end-of-cell marker when possible"""
# pylint: disable=W0613,R0201
return text
class MarkdownCellExporter(BaseCellExporter):
"""A class that represent a notebook cell as Markdown"""
@others
default_comment_magics = False
cell_reader = MarkdownCellReader
def __init__(self, *args, **kwargs):
BaseCellExporter.__init__(self, *args, **kwargs)
self.comment = ""
@language jupytext
# %% [markdown]
# Loading and Analyzing Data
# ==========================
A **bold** word.
def html_comment(self, metadata, code="region"):
"""Protect a Markdown or Raw cell with HTML comments"""
if metadata:
region_start = [
"<!-- #" + code,
metadata_to_text(metadata, plain_json=self.cell_metadata_json),
"-->",
]
region_start = " ".join(region_start)
else:
region_start = f"<!-- #{code} -->"
return [region_start] + self.source + [f"<!-- #end{code} -->"]
def cell_to_text(self):
"""Return the text representation of a cell"""
if self.cell_type == "markdown":
if self.doxygen_equation_markers and self.cell_type == "markdown":
self.source = markdown_to_doxygen("\n".join(self.source)).splitlines()
# Is an explicit region required?
if self.metadata:
protect = True
else:
# Would the text be parsed to a shorter cell/a cell with a different type?
cell, pos = self.cell_reader(self.fmt).read(self.source)
protect = pos < len(self.source) or cell.cell_type != self.cell_type
if protect:
return self.html_comment(
self.metadata, self.metadata.pop("region_name", "region")
)
return self.source
return self.code_to_text()
def code_to_text(self):
"""Return the text representation of a code cell"""
source = copy(self.source)
comment_magic(source, self.language, self.comment_magics)
if self.metadata.get("active") == "":
self.metadata.pop("active")
self.language = self.metadata.pop("language", self.language)
if self.cell_type == "raw" and not is_active(self.ext, self.metadata, False):
return self.html_comment(self.metadata, "raw")
options = metadata_to_text(self.language, self.metadata)
code_cell_delimiter = three_backticks_or_more(self.source)
return [code_cell_delimiter + options] + source + [code_cell_delimiter]
class RMarkdownCellExporter(MarkdownCellExporter):
"""A class that represent a notebook cell as R Markdown"""
@others
default_comment_magics = True
cell_reader = RMarkdownCellReader
def __init__(self, *args, **kwargs):
MarkdownCellExporter.__init__(self, *args, **kwargs)
self.ext = ".Rmd"
self.comment = ""
def code_to_text(self):
"""Return the text representation of a code cell"""
active = is_active(self.ext, self.metadata)
source = copy(self.source)
if active:
comment_magic(source, self.language, self.comment_magics)
lines = []
if not is_active(self.ext, self.metadata):
self.metadata["eval"] = False
options = metadata_to_rmd_options(
self.language, self.metadata, self.use_runtools
)
lines.append(f"```{{{options}}}")
lines.extend(source)
lines.append("```")
return lines
def endofcell_marker(source, comment):
"""Issues #31 #38: does the cell contain a blank line? In that case
we add an end-of-cell marker"""
endofcell = "-"
while True:
endofcell_re = re.compile(rf"^{re.escape(comment)}( )" + endofcell + r"\s*$")
if list(filter(endofcell_re.match, source)):
endofcell = endofcell + "-"
else:
return endofcell
class LightScriptCellExporter(BaseCellExporter):
"""A class that represent a notebook cell as a Python or Julia script"""
@others
default_comment_magics = True
use_cell_markers = True
cell_marker_start = None
cell_marker_end = None
def __init__(self, *args, **kwargs):
BaseCellExporter.__init__(self, *args, **kwargs)
if "cell_markers" in self.fmt:
if "," not in self.fmt["cell_markers"]:
warnings.warn(
"Ignored cell markers '{}' as it does not match the expected 'start,end' pattern".format(
self.fmt.pop("cell_markers")
)
)
elif self.fmt["cell_markers"] != "+,-":
self.cell_marker_start, self.cell_marker_end = self.fmt[
"cell_markers"
].split(",", 1)
for key in ["endofcell"]:
if key in self.unfiltered_metadata:
self.metadata[key] = self.unfiltered_metadata[key]
def is_code(self):
# Treat markdown cells with metadata as code cells (#66)
if (self.cell_type == "markdown" and self.metadata) or self.use_triple_quotes():
if is_active(self.ext, self.metadata):
self.metadata["cell_type"] = self.cell_type
self.source = self.markdown_to_text(self.source)
self.cell_type = "code"
self.unfiltered_metadata = copy(self.unfiltered_metadata)
self.unfiltered_metadata.pop("cell_marker", "")
return True
return super().is_code()
@language jupytext
# %%
def spam(s: str) -> str:
"""A docstring a"""
return '1234'
def code_to_text(self):
"""Return the text representation of a code cell"""
active = is_active(
self.ext, self.metadata, same_language(self.language, self.default_language)
)
source = copy(self.source)
escape_code_start(source, self.ext, self.language)
comment_questions = self.metadata.pop("comment_questions", True)
if active:
comment_magic(source, self.language, self.comment_magics, comment_questions)
else:
source = self.markdown_to_text(source)
if (
active
and comment_questions
and need_explicit_marker(self.source, self.language, self.comment_magics)
) or self.explicit_start_marker(source):
self.metadata["endofcell"] = self.cell_marker_end or endofcell_marker(
source, self.comment
)
if not self.metadata or not self.use_cell_markers:
return source
lines = []
endofcell = self.metadata["endofcell"]
if endofcell == "-" or self.cell_marker_end:
del self.metadata["endofcell"]
cell_start = [self.comment, self.cell_marker_start or "+"]
options = metadata_to_double_percent_options(
self.metadata, self.cell_metadata_json
)
if options:
cell_start.append(options)
lines.append(" ".join(cell_start))
lines.extend(source)
lines.append(self.comment + f" {endofcell}")
return lines
def explicit_start_marker(self, source):
"""Does the python representation of this cell requires an explicit
start of cell marker?"""
if not self.use_cell_markers:
return False
if self.metadata:
return True
if self.cell_marker_start:
start_code_re = re.compile(
"^" + self.comment + r"\s*" + self.cell_marker_start + r"\s*(.*)$"
)
end_code_re = re.compile(
"^" + self.comment + r"\s*" + self.cell_marker_end + r"\s*$"
)
if start_code_re.match(source[0]) or end_code_re.match(source[0]):
return False
if all([line.startswith(self.comment) for line in self.source]):
return True
if LightScriptCellReader(self.fmt).read(source)[1] < len(source):
return True
return False
def remove_eoc_marker(self, text, next_text):
"""Remove end of cell marker when next cell has an explicit start marker"""
if self.cell_marker_start:
return text
if self.is_code() and text[-1] == self.comment + " -":
# remove end of cell marker when redundant with next explicit marker
if not next_text or next_text[0].startswith(self.comment + " +"):
text = text[:-1]
# When we do not need the end of cell marker, number of blank lines is the max
# between that required at the end of the cell, and that required before the next cell.
if self.lines_to_end_of_cell_marker and (
self.lines_to_next_cell is None
or self.lines_to_end_of_cell_marker > self.lines_to_next_cell
):
self.lines_to_next_cell = self.lines_to_end_of_cell_marker
else:
# Insert blank lines at the end of the cell
blank_lines = self.lines_to_end_of_cell_marker
if blank_lines is None:
# two blank lines when required by pep8
blank_lines = pep8_lines_between_cells(
text[:-1], next_text, self.ext
)
blank_lines = 0 if blank_lines < 2 else 2
text = text[:-1] + [""] * blank_lines + text[-1:]
return text
class BareScriptCellExporter(LightScriptCellExporter):
"""A class that writes notebook cells as scripts with no cell markers"""
use_cell_markers = False
class RScriptCellExporter(BaseCellExporter):
"""A class that can represent a notebook cell as a R script"""
@others
default_comment_magics = True
def __init__(self, *args, **kwargs):
BaseCellExporter.__init__(self, *args, **kwargs)
self.comment = "#'"
def code_to_text(self):
"""Return the text representation of a code cell"""
active = is_active(self.ext, self.metadata)
source = copy(self.source)
escape_code_start(source, self.ext, self.language)
if active:
comment_magic(source, self.language, self.comment_magics)
if not active:
source = ["# " + line if line else "#" for line in source]
lines = []
if not is_active(self.ext, self.metadata):
self.metadata["eval"] = False
options = metadata_to_rmd_options(None, self.metadata, self.use_runtools)
if options:
lines.append(f"#+ {options}")
lines.extend(source)
return lines
class DoublePercentCellExporter(BaseCellExporter): # pylint: disable=W0223
"""A class that can represent a notebook cell as a Spyder/VScode script (#59)"""
@others
default_comment_magics = True
parse_cell_language = True
def __init__(self, *args, **kwargs):
BaseCellExporter.__init__(self, *args, **kwargs)
self.cell_markers = self.fmt.get("cell_markers")
def cell_to_text(self):
"""Return the text representation for the cell"""
# Go notebooks have '%%' or '%% -' magic commands that need to be escaped
if self.default_language == "go" and self.language == "go":
self.source = [
re.sub(r"^(//\s*)*(%%\s*$|%%\s+-.*$)", r"\1//gonb:\2", line)
for line in self.source
]
active = is_active(
self.ext, self.metadata, same_language(self.language, self.default_language)
)
if (
self.cell_type == "raw"
and "active" in self.metadata
and self.metadata["active"] == ""
):
del self.metadata["active"]
if not self.is_code():
self.metadata["cell_type"] = self.cell_type
options = metadata_to_double_percent_options(
self.metadata, self.cell_metadata_json
)
indent = ""
if self.is_code() and active and self.source:
first_line = self.source[0]
if first_line.strip():
left_space = re.compile(r"^(\s*)").match(first_line)
if left_space:
indent = left_space.groups()[0]
if options.startswith("%") or not options:
lines = comment_lines(
["%%" + options], indent + self.comment, self.comment_suffix
)
else:
lines = comment_lines(
["%% " + options], indent + self.comment, self.comment_suffix
)
if self.is_code() and active:
source = copy(self.source)
comment_magic(source, self.language, self.comment_magics)
if source == [""]:
return lines
return lines + source
return lines + self.markdown_to_text(self.source)
@language jupytext
@others # test.
@language python
@others # test2.
class HydrogenCellExporter(DoublePercentCellExporter): # pylint: disable=W0223
"""A class that can represent a notebook cell as a Hydrogen script (#59)"""
default_comment_magics = False
parse_cell_language = False
class SphinxGalleryCellExporter(BaseCellExporter): # pylint: disable=W0223
"""A class that can represent a notebook cell as a
Sphinx Gallery script (#80)"""
@others
default_cell_marker = "#" * 79
default_comment_magics = True
def __init__(self, *args, **kwargs):
BaseCellExporter.__init__(self, *args, **kwargs)
self.comment = "#"
for key in ["cell_marker"]:
if key in self.unfiltered_metadata:
self.metadata[key] = self.unfiltered_metadata[key]
if self.fmt.get("rst2md"):
raise ValueError(
"The 'rst2md' option is a read only option. The reverse conversion is not "
"implemented. Please either deactivate the option, or save to another format."
) # pragma: no cover
def cell_to_text(self):
"""Return the text representation for the cell"""
if self.cell_type == "code":
source = copy(self.source)
return comment_magic(source, self.language, self.comment_magics)
if "cell_marker" in self.metadata:
cell_marker = self.metadata.pop("cell_marker")
else:
cell_marker = self.default_cell_marker
if self.source == [""]:
return [cell_marker] if cell_marker in ['""', "''"] else ['""']
if cell_marker in ['"""', "'''"]:
return [cell_marker] + self.source + [cell_marker]
return [
cell_marker
if cell_marker.startswith("#" * 20)
else self.default_cell_marker
] + comment_lines(self.source, self.comment, self.comment_suffix)
@language md
@wrap
@nosearch
https://github.com/leo-editor/leo-editor/issues/4146
PR: https://github.com/leo-editor/leo-editor/pull/4147
# Section
## Section
@language python
def spam():
if
"""Combine source and outputs from two notebooks
"""
import re
from copy import copy
from nbformat import NotebookNode
from .cell_metadata import _IGNORE_CELL_METADATA
from .formats import long_form_one_format
from .header import _DEFAULT_NOTEBOOK_METADATA
from .metadata_filter import restore_filtered_metadata
_BLANK_LINE = re.compile(r"^\s*$")
@others
@language python
@tabwidth -4
def black_invariant(text, chars=None):
"""Remove characters that may be changed when reformatting the text with black"""
if chars is None:
chars = [" ", "\t", "\n", ",", "'", '"', "(", ")", "\\"]
for char in chars:
text = text.replace(char, "")
return text
def same_content(ref, test, endswith=False):
"""Is the content of two cells the same, up to reformatting by black"""
ref = black_invariant(ref)
test = black_invariant(test)
if endswith and test:
return ref.endswith(test)
return ref == test
def combine_inputs_with_outputs(nb_source, nb_outputs, fmt=None):
"""Return a notebook that combines the text and metadata from the first notebook,
with the outputs and metadata of the second notebook."""
# nbformat version number taken from the notebook with outputs
assert (
nb_outputs.nbformat == nb_source.nbformat
), "The notebook with outputs is in format {}.{}, please upgrade it to {}.x".format(
nb_outputs.nbformat, nb_outputs.nbformat_minor, nb_source.nbformat
)
nb_source.nbformat_minor = nb_outputs.nbformat_minor
fmt = long_form_one_format(fmt)
text_repr = nb_source.metadata.get("jupytext", {}).get("text_representation", {})
ext = fmt.get("extension") or text_repr.get("extension")
format_name = fmt.get("format_name") or text_repr.get("format_name")
notebook_metadata_filter = nb_source.metadata.get("jupytext", {}).get(
"notebook_metadata_filter"
)
if notebook_metadata_filter == "-all":
nb_metadata = nb_outputs.metadata
else:
nb_metadata = restore_filtered_metadata(
nb_source.metadata,
nb_outputs.metadata,
notebook_metadata_filter,
_DEFAULT_NOTEBOOK_METADATA,
)
source_is_md_version_one = (
ext in [".md", ".markdown", ".Rmd"] and text_repr.get("format_version") == "1.0"
)
if nb_metadata.get("jupytext", {}).get("formats") or ext in [
".md",
".markdown",
".Rmd",
]:
nb_metadata.get("jupytext", {}).pop("text_representation", None)
if not nb_metadata.get("jupytext", {}):
nb_metadata.pop("jupytext", {})
if format_name in ["nomarker", "sphinx"] or source_is_md_version_one:
cell_metadata_filter = "-all"
else:
cell_metadata_filter = nb_metadata.get("jupytext", {}).get(
"cell_metadata_filter"
)
outputs_map = map_outputs_to_inputs(nb_source.cells, nb_outputs.cells)
cells = []
for source_cell, j in zip(nb_source.cells, outputs_map):
if j is None:
cells.append(source_cell)
continue
output_cell = nb_outputs.cells[j]
# Outputs and optional attributes are taken from the notebook with outputs
cell = copy(output_cell)
# Cell text is taken from the source notebook
cell.source = source_cell.source
# We also restore the cell metadata that has been filtered
cell.metadata = restore_filtered_metadata(
source_cell.metadata,
output_cell.metadata,
# The 'spin' format does not allow metadata on non-code cells
"-all"
if format_name == "spin" and source_cell.cell_type != "code"
else cell_metadata_filter,
_IGNORE_CELL_METADATA,
)
cells.append(cell)
# We call NotebookNode rather than new_notebook as we don't want to validate
# the notebook (some of the notebook in the collection of test notebooks
# do have some invalid properties - probably inherited from an older version
# of the notebook format).
return NotebookNode(
cells=cells,
metadata=nb_metadata,
nbformat=nb_outputs.nbformat,
nbformat_minor=nb_outputs.nbformat_minor,
)
@killcolor
@language jupytext
# %% [markdown]
# Loading and Analyzing Data
# ==========================
A **bold** word.
# %%
import pandas as pd # A comment
def spam(s: str) -> str:
return 'hi123'
# %% [markdown]
*Italics* and **bold**.
def map_outputs_to_inputs(cells_inputs, cells_outputs):
"""Returns a map i->(j or None) that maps the cells with outputs to the input cells"""
n_in = len(cells_inputs)
n_out = len(cells_outputs)
outputs_map = [None] * n_in
# First rule: match based on cell type, content, in increasing order, for each cell type
first_unmatched_output_per_cell_type = {}
for i in range(n_in):
cell_input = cells_inputs[i]
for j in range(
first_unmatched_output_per_cell_type.get(cell_input.cell_type, 0), n_out
):
cell_output = cells_outputs[j]
if cell_input.cell_type == cell_output.cell_type and same_content(
cell_input.source, cell_output.source
):
outputs_map[i] = j
first_unmatched_output_per_cell_type[cell_input.cell_type] = j + 1
break
# Second rule: match unused outputs based on cell type and content
# Third rule: is the new cell the final part of a previous cell with outputs?
unused_ouputs = set(range(n_out)).difference(outputs_map)
for endswith in [False, True]:
if not unused_ouputs:
return outputs_map
for i in range(n_in):
if outputs_map[i] is not None:
continue
cell_input = cells_inputs[i]
for j in unused_ouputs:
cell_output = cells_outputs[j]
if cell_input.cell_type == cell_output.cell_type and same_content(
cell_output.source, cell_input.source, endswith
):
outputs_map[i] = j
unused_ouputs.remove(j)
break
# Fourth rule: match based on increasing index (and cell type) for non-empty cells
if not unused_ouputs:
return outputs_map
prev_j = -1
for i in range(n_in):
if outputs_map[i] is not None:
prev_j = outputs_map[i]
continue
j = prev_j + 1
if j not in unused_ouputs:
continue
cell_input = cells_inputs[i]
cell_output = cells_outputs[j]
if (
cell_input.cell_type == cell_output.cell_type
and cell_input.source.strip() != ""
):
outputs_map[i] = j
unused_ouputs.remove(j)
prev_j = j
return outputs_map
"""Compare two Jupyter notebooks"""
import difflib
import json
import os
import re
from jupytext.paired_paths import full_path
from .cell_metadata import _IGNORE_CELL_METADATA
from .combine import combine_inputs_with_outputs
from .formats import check_auto_ext, long_form_one_format
from .header import _DEFAULT_NOTEBOOK_METADATA
from .jupytext import read, reads, write, writes
from .metadata_filter import filter_metadata
_BLANK_LINE = re.compile(r"^\s*$")
@others
@language python
@tabwidth -4
def _multilines(obj):
try:
lines = obj.splitlines()
return lines + [""] if obj.endswith("\n") else lines
except AttributeError:
# Remove the final blank space on Python 2.7
# return json.dumps(obj, indent=True, sort_keys=True).splitlines()
return [
line.rstrip()
for line in json.dumps(obj, indent=True, sort_keys=True).splitlines()
]
def compare(
actual, expected, actual_name="actual", expected_name="expected", return_diff=False
):
"""Compare two strings, lists or dict-like objects"""
if actual != expected:
diff = difflib.unified_diff(
_multilines(expected),
_multilines(actual),
expected_name,
actual_name,
lineterm="",
)
if expected_name == "" and actual_name == "":
diff = list(diff)[2:]
diff = "\n".join(diff)
if return_diff:
return diff
raise AssertionError("\n" + diff)
return "" if return_diff else None
def filtered_cell(cell, preserve_outputs, cell_metadata_filter):
"""Cell type, metadata and source from given cell"""
filtered = {
"cell_type": cell.cell_type,
"source": cell.source,
"metadata": filter_metadata(
cell.metadata, cell_metadata_filter, _IGNORE_CELL_METADATA
),
}
if preserve_outputs:
for key in ["execution_count", "outputs"]:
if key in cell:
filtered[key] = cell[key]
return filtered
def filtered_notebook_metadata(notebook, ignore_display_name=False):
"""Notebook metadata, filtered for metadata added by Jupytext itself"""
metadata = filter_metadata(
notebook.metadata,
notebook.metadata.get("jupytext", {}).get("notebook_metadata_filter"),
_DEFAULT_NOTEBOOK_METADATA,
)
# The display name for the kernel might change (Quarto format on the CI)
if ignore_display_name:
metadata.get("kernelspec", {}).pop("display_name", None)
if "jupytext" in metadata:
del metadata["jupytext"]
return metadata
class NotebookDifference(Exception):
"""Report notebook differences"""
def same_content(ref_source, test_source, allow_removed_final_blank_line):
"""Is the content of two cells the same, except for an optional final blank line?"""
if ref_source == test_source:
return True
if not allow_removed_final_blank_line:
return False
# Is ref identical to test, plus one blank line?
ref_source = ref_source.splitlines()
test_source = test_source.splitlines()
if not ref_source:
return False
if ref_source[:-1] != test_source:
return False
return _BLANK_LINE.match(ref_source[-1])
def compare_notebooks(
notebook_actual,
notebook_expected,
fmt=None,
allow_expected_differences=True,
raise_on_first_difference=True,
compare_outputs=False,
compare_ids=None,
):
"""Compare the two notebooks, and raise with a meaningful message
that explains the differences, if any"""
fmt = long_form_one_format(fmt)
format_name = fmt.get("format_name")
if (
format_name == "sphinx"
and notebook_actual.cells
and notebook_actual.cells[0].source == "%matplotlib inline"
):
notebook_actual.cells = notebook_actual.cells[1:]
if compare_ids is None:
compare_ids = compare_outputs
modified_cells, modified_cell_metadata = compare_cells(
notebook_actual.cells,
notebook_expected.cells,
raise_on_first_difference,
compare_outputs=compare_outputs,
compare_ids=compare_ids,
cell_metadata_filter=notebook_actual.get("jupytext", {}).get(
"cell_metadata_filter"
),
allow_missing_code_cell_metadata=(
allow_expected_differences and format_name == "sphinx"
),
allow_missing_markdown_cell_metadata=(
allow_expected_differences and format_name in ["sphinx", "spin"]
),
allow_filtered_cell_metadata=allow_expected_differences,
allow_removed_final_blank_line=allow_expected_differences,
)
# Compare notebook metadata
modified_metadata = False
try:
ignore_display_name = (
fmt.get("extension") == ".qmd" and allow_expected_differences
)
compare(
filtered_notebook_metadata(notebook_actual, ignore_display_name),
filtered_notebook_metadata(notebook_expected, ignore_display_name),
)
except AssertionError as error:
if raise_on_first_difference:
raise NotebookDifference(f"Notebook metadata differ: {str(error)}")
modified_metadata = True
error = []
if modified_cells:
error.append(
"Cells {} differ ({}/{})".format(
",".join([str(i) for i in modified_cells]),
len(modified_cells),
len(notebook_expected.cells),
)
)
if modified_cell_metadata:
error.append(
"Cell metadata '{}' differ".format(
"', '".join([str(i) for i in modified_cell_metadata])
)
)
if modified_metadata:
error.append("Notebook metadata differ")
if error:
raise NotebookDifference(" | ".join(error))
def compare_cells(
actual_cells,
expected_cells,
raise_on_first_difference=True,
compare_outputs=True,
compare_ids=True,
cell_metadata_filter=None,
allow_missing_code_cell_metadata=False,
allow_missing_markdown_cell_metadata=False,
allow_filtered_cell_metadata=False,
allow_removed_final_blank_line=False,
):
"""Compare two collection of notebook cells"""
test_cell_iter = iter(actual_cells)
modified_cells = set()
modified_cell_metadata = set()
for i, ref_cell in enumerate(expected_cells, 1):
try:
test_cell = next(test_cell_iter)
except StopIteration:
if raise_on_first_difference:
raise NotebookDifference(
"No cell corresponding to {} cell #{}:\n{}".format(
ref_cell.cell_type, i, ref_cell.source
)
)
modified_cells.update(range(i, len(expected_cells) + 1))
break
ref_lines = [
line for line in ref_cell.source.splitlines() if not _BLANK_LINE.match(line)
]
test_lines = []
# 1. test cell type
if ref_cell.cell_type != test_cell.cell_type:
if raise_on_first_difference:
raise NotebookDifference(
"Unexpected cell type '{}' for {} cell #{}:\n{}".format(
test_cell.cell_type, ref_cell.cell_type, i, ref_cell.source
)
)
modified_cells.add(i)
# Compare cell ids (introduced in nbformat 5.1.0)
if compare_ids and test_cell.get("id") != ref_cell.get("id"):
if raise_on_first_difference:
raise NotebookDifference(
f"Cell ids differ on {test_cell['cell_type']} cell #{i}: "
f"'{test_cell.get('id')}' != '{ref_cell.get('id')}'"
)
modified_cells.add(i)
# 2. test cell metadata
if (ref_cell.cell_type == "code" and not allow_missing_code_cell_metadata) or (
ref_cell.cell_type != "code" and not allow_missing_markdown_cell_metadata
):
ref_metadata = ref_cell.metadata
test_metadata = test_cell.metadata
if allow_filtered_cell_metadata:
ref_metadata = {
key: ref_metadata[key]
for key in ref_metadata
if key not in _IGNORE_CELL_METADATA
}
test_metadata = {
key: test_metadata[key]
for key in test_metadata
if key not in _IGNORE_CELL_METADATA
}
if ref_metadata != test_metadata:
if raise_on_first_difference:
try:
compare(test_metadata, ref_metadata)
except AssertionError as error:
raise NotebookDifference(
"Metadata differ on {} cell #{}: {}\nCell content:\n{}".format(
test_cell.cell_type, i, str(error), ref_cell.source
)
)
else:
modified_cell_metadata.update(
set(test_metadata).difference(ref_metadata)
)
modified_cell_metadata.update(
set(ref_metadata).difference(test_metadata)
)
for key in set(ref_metadata).intersection(test_metadata):
if ref_metadata[key] != test_metadata[key]:
modified_cell_metadata.add(key)
test_lines.extend(
[
line
for line in test_cell.source.splitlines()
if not _BLANK_LINE.match(line)
]
)
# 3. test cell content
if ref_lines != test_lines:
if raise_on_first_difference:
try:
compare("\n".join(test_lines), "\n".join(ref_lines))
except AssertionError as error:
raise NotebookDifference(
"Cell content differ on {} cell #{}: {}".format(
test_cell.cell_type, i, str(error)
)
)
else:
modified_cells.add(i)
# 3. bis test entire cell content
if not same_content(
ref_cell.source, test_cell.source, allow_removed_final_blank_line
):
if ref_cell.source != test_cell.source:
if raise_on_first_difference:
diff = compare(test_cell.source, ref_cell.source, return_diff=True)
raise NotebookDifference(
"Cell content differ on {} cell #{}: {}".format(
test_cell.cell_type, i, diff
)
)
modified_cells.add(i)
if not compare_outputs:
continue
if ref_cell.cell_type != "code":
continue
ref_cell = filtered_cell(
ref_cell,
preserve_outputs=compare_outputs,
cell_metadata_filter=cell_metadata_filter,
)
test_cell = filtered_cell(
test_cell,
preserve_outputs=compare_outputs,
cell_metadata_filter=cell_metadata_filter,
)
try:
compare(test_cell, ref_cell)
except AssertionError as error:
if raise_on_first_difference:
raise NotebookDifference(
"Cell outputs differ on {} cell #{}: {}".format(
test_cell["cell_type"], i, str(error)
)
)
modified_cells.add(i)
# More cells in the actual notebook?
remaining_cell_count = 0
while True:
try:
test_cell = next(test_cell_iter)
if raise_on_first_difference:
raise NotebookDifference(
"Additional {} cell: {}".format(
test_cell.cell_type, test_cell.source
)
)
remaining_cell_count += 1
except StopIteration:
break
if remaining_cell_count and not raise_on_first_difference:
modified_cells.update(
range(
len(expected_cells) + 1,
len(expected_cells) + 1 + remaining_cell_count,
)
)
return modified_cells, modified_cell_metadata
# @killcolor
@nocolor
@language python
def spam():
pass
@color
def spam2():
pass
def test_round_trip_conversion(
notebook, fmt, update, allow_expected_differences=True, stop_on_first_error=True
):
"""Test round trip conversion for a Jupyter notebook"""
text = writes(notebook, fmt)
round_trip = reads(text, fmt)
if update:
round_trip = combine_inputs_with_outputs(round_trip, notebook, fmt)
compare_notebooks(
round_trip,
notebook,
fmt,
allow_expected_differences,
raise_on_first_difference=stop_on_first_error,
)
# The functions below are used in the Jupytext text collection
def create_mirror_file_if_missing(mirror_file, notebook, fmt):
if not os.path.isfile(mirror_file):
write(notebook, mirror_file, fmt=fmt)
def assert_conversion_same_as_mirror(nb_file, fmt, mirror_name, compare_notebook=False):
"""This function is used in the tests"""
dirname, basename = os.path.split(nb_file)
file_name, org_ext = os.path.splitext(basename)
fmt = long_form_one_format(fmt)
notebook = read(nb_file, fmt=fmt)
fmt = check_auto_ext(fmt, notebook.metadata, "")
ext = fmt["extension"]
mirror_file = os.path.join(
dirname, "..", "..", "outputs", mirror_name, full_path(file_name, fmt)
)
# it's better not to have Jupytext metadata in test notebooks:
if fmt == "ipynb" and "jupytext" in notebook.metadata: # pragma: no cover
notebook.metadata.pop("jupytext")
write(nb_file, fmt=fmt)
create_mirror_file_if_missing(mirror_file, notebook, fmt)
# Compare the text representation of the two notebooks
if compare_notebook:
# Read and convert the mirror file to the latest nbformat version if necessary
nb_mirror = read(mirror_file, as_version=notebook.nbformat)
nb_mirror.nbformat_minor = notebook.nbformat_minor
compare_notebooks(nb_mirror, notebook)
return
elif ext == ".ipynb":
notebook = read(mirror_file)
fmt.update({"extension": org_ext})
actual = writes(notebook, fmt)
with open(nb_file, encoding="utf-8") as fp:
expected = fp.read()
else:
actual = writes(notebook, fmt)
with open(mirror_file, encoding="utf-8") as fp:
expected = fp.read()
if not actual.endswith("\n"):
actual = actual + "\n"
compare(actual, expected)
# Compare the two notebooks
if ext != ".ipynb":
notebook = read(nb_file)
nb_mirror = read(mirror_file, fmt=fmt)
if fmt.get("format_name") == "sphinx":
nb_mirror.cells = nb_mirror.cells[1:]
for cell in notebook.cells:
cell.metadata = {}
for cell in nb_mirror.cells:
cell.metadata = {}
compare_notebooks(nb_mirror, notebook, fmt)
nb_mirror = combine_inputs_with_outputs(nb_mirror, notebook)
compare_notebooks(nb_mirror, notebook, fmt, compare_outputs=True)
def notebook_model(nb):
"""Return a notebook model, with content a
dictionary rather than a notebook object.
To be used in tests only."""
return dict(type="notebook", content=json.loads(json.dumps(nb)))
"""Find and read Jupytext configuration files"""
import json
import os
try:
import tomllib
except ImportError:
import tomli as tomllib
import warnings
import yaml
from traitlets import Bool, Dict, Enum, Float, List, Unicode, Union
from traitlets.config import Configurable
from traitlets.config.loader import PyFileConfigLoader
from traitlets.traitlets import TraitError
from .formats import (
NOTEBOOK_EXTENSIONS,
long_form_multiple_formats,
long_form_one_format,
short_form_multiple_formats,
)
@others
@language python
@tabwidth -4
class JupytextConfigurationError(ValueError):
"""Error in the specification of the format for the text notebook"""
JUPYTEXT_CONFIG_FILES = [
"jupytext",
"jupytext.toml",
"jupytext.yml",
"jupytext.yaml",
"jupytext.json",
]
JUPYTEXT_CONFIG_FILES.extend(
["." + filename for filename in JUPYTEXT_CONFIG_FILES] + [".jupytext.py"]
)
PYPROJECT_FILE = "pyproject.toml"
JUPYTEXT_CEILING_DIRECTORIES = [
path
for path in os.environ.get("JUPYTEXT_CEILING_DIRECTORIES", "").split(":")
if path
]
class JupytextConfiguration(Configurable):
"""Jupytext Configuration's options"""
@others
formats = Union(
[Unicode(), List(Unicode()), Dict(Unicode())],
help="Save notebooks to these file extensions. "
"Can be any of ipynb,Rmd,md,jl,py,R,nb.jl,nb.py,nb.R "
"comma separated. If you want another format than the "
"default one, append the format name to the extension, "
"e.g. ipynb,py:percent to save the notebook to "
"hydrogen/spyder/vscode compatible scripts",
config=True,
)
default_jupytext_formats = Unicode(
help="Deprecated. Use 'formats' instead", config=True
)
preferred_jupytext_formats_save = Unicode(
help="Preferred format when saving notebooks as text, per extension. "
'Use "jl:percent,py:percent,R:percent" if you want to save '
"Julia, Python and R scripts in the double percent format and "
'only write "jupytext_formats": "py" in the notebook metadata.',
config=True,
)
preferred_jupytext_formats_read = Unicode(
help="Preferred format when reading notebooks from text, per "
'extension. Use "py:sphinx" if you want to read all python '
"scripts as Sphinx gallery scripts.",
config=True,
)
notebook_metadata_filter = Unicode(
help="Notebook metadata that should be save in the text representations. "
"Examples: 'all', '-all', 'widgets,nteract', 'kernelspec,jupytext-all'",
config=True,
)
default_notebook_metadata_filter = Unicode(
"", help="Deprecated. Use 'notebook_metadata_filter' instead", config=True
)
hide_notebook_metadata = Enum(
values=[True, False],
allow_none=True,
help="Should the notebook metadata be wrapped into an HTML comment in the Markdown format?",
config=True,
)
root_level_metadata_as_raw_cell = Bool(
True,
help="Should the root level metadata of text documents (like the fields 'title' or 'author' in "
"R Markdown document) appear as a raw cell in the notebook (True), or go to the notebook"
"metadata?",
config=True,
)
cell_metadata_filter = Unicode(
help="Cell metadata that should be saved in the text representations. "
"Examples: 'all', 'hide_input,hide_output'",
config=True,
)
default_cell_metadata_filter = Unicode(
"", help="Deprecated. Use 'cell_metadata_filter' instead", config=True
)
comment_magics = Enum(
values=[True, False],
allow_none=True,
help="Should Jupyter magic commands be commented out in the text representation?",
config=True,
)
split_at_heading = Bool(
False,
help="Split markdown cells on headings (Markdown and R Markdown formats only)",
config=True,
)
sphinx_convert_rst2md = Bool(
False,
help="When opening a Sphinx Gallery script, convert the reStructuredText to markdown",
config=True,
)
doxygen_equation_markers = Bool(
False,
help="Should equation markers use the DOxygen format? "
"(see https://github.com/mwouts/jupytext/issues/517)",
config=True,
)
outdated_text_notebook_margin = Float(
1.0,
help="Refuse to overwrite inputs of a ipynb notebooks with those of a "
"text notebook when the text notebook plus margin is older than "
"the ipynb notebook (NB: This option is ignored by Jupytext CLI)",
config=True,
)
cm_config_log_level = Enum(
values=["warning", "info", "info_if_changed", "debug", "none"],
default_value="info_if_changed",
help="The log level for config file logs in the Jupytext contents manager",
config=True,
)
cell_markers = Unicode(
help='Start and end cell markers for the light format, comma separated. Use "{{{,}}}" to mark cells'
'as foldable regions in Vim, and "region,endregion" to mark cells as Vscode/PyCharm regions',
config=True,
)
default_cell_markers = Unicode(
help="Deprecated. Use 'cell_markers' instead", config=True
)
notebook_extensions = Union(
[List(Unicode(), NOTEBOOK_EXTENSIONS), Unicode()],
help="A list of notebook extensions",
config=True,
)
custom_cell_magics = Unicode(
help='A comma separated list of cell magics. Use e.g. custom_cell_magics = "configure,local" '
'if you want code cells starting with the Spark magic cell commands "configure" and "local" '
"to be commented out when converted to scripts.",
config=True,
)
def set_default_format_options(self, format_options, read=False):
"""Set default format option"""
if self.default_notebook_metadata_filter:
warnings.warn(
"The option 'default_notebook_metadata_filter' is deprecated. "
"Please use 'notebook_metadata_filter' instead.",
FutureWarning,
)
format_options.setdefault(
"notebook_metadata_filter", self.default_notebook_metadata_filter
)
if self.notebook_metadata_filter:
format_options.setdefault(
"notebook_metadata_filter", self.notebook_metadata_filter
)
if self.default_cell_metadata_filter:
warnings.warn(
"The option 'default_cell_metadata_filter' is deprecated. "
"Please use 'cell_metadata_filter' instead.",
FutureWarning,
)
format_options.setdefault(
"cell_metadata_filter", self.default_cell_metadata_filter
)
if self.cell_metadata_filter:
format_options.setdefault("cell_metadata_filter", self.cell_metadata_filter)
if self.hide_notebook_metadata is not None:
format_options.setdefault(
"hide_notebook_metadata", self.hide_notebook_metadata
)
if self.root_level_metadata_as_raw_cell is False:
format_options.setdefault(
"root_level_metadata_as_raw_cell", self.root_level_metadata_as_raw_cell
)
if self.comment_magics is not None:
format_options.setdefault("comment_magics", self.comment_magics)
if self.split_at_heading:
format_options.setdefault("split_at_heading", self.split_at_heading)
if self.doxygen_equation_markers:
format_options.setdefault(
"doxygen_equation_markers", self.doxygen_equation_markers
)
if not read:
if self.default_cell_markers:
warnings.warn(
"The option 'default_cell_markers' is deprecated. "
"Please use 'cell_markers' instead.",
FutureWarning,
)
format_options.setdefault("cell_markers", self.default_cell_markers)
if self.cell_markers:
format_options.setdefault("cell_markers", self.cell_markers)
if read and self.sphinx_convert_rst2md:
format_options.setdefault("rst2md", self.sphinx_convert_rst2md)
if self.custom_cell_magics:
format_options.setdefault("custom_cell_magics", self.custom_cell_magics)
def default_formats(self, path):
"""Return the default formats, if they apply to the current path #157"""
from .paired_paths import InconsistentPath, base_path
if self.default_jupytext_formats:
warnings.warn(
"The option 'default_jupytext_formats' is deprecated. "
"Please use 'formats' instead.",
FutureWarning,
)
formats = self.formats or self.default_jupytext_formats
for fmt in long_form_multiple_formats(formats):
try:
base_path(path, fmt)
return formats
except InconsistentPath:
continue
return None
def __eq__(self, other):
for key in self.class_trait_names():
if getattr(self, key) != getattr(other, key):
return False
return True
@language python
import pandas as pd # A comment
def spam(s: str) -> str:
return 'hi'
@language md
# Loading and Analyzing Data
# ==========================
*Italics* and **bold**
More markdown.
def preferred_format(incomplete_format, preferred_formats):
"""Return the preferred format for the given extension"""
incomplete_format = long_form_one_format(incomplete_format)
if "format_name" in incomplete_format:
return incomplete_format
for fmt in long_form_multiple_formats(preferred_formats):
if (
(
incomplete_format["extension"] == fmt["extension"]
or (
fmt["extension"] == ".auto"
and incomplete_format["extension"]
not in [".md", ".markdown", ".Rmd", ".ipynb"]
)
)
and incomplete_format.get("suffix")
== fmt.get("suffix", incomplete_format.get("suffix"))
and incomplete_format.get("prefix")
== fmt.get("prefix", incomplete_format.get("prefix"))
):
fmt.update(incomplete_format)
return fmt
return incomplete_format
def global_jupytext_configuration_directories():
"""Return the directories in which Jupytext will search for a configuration file"""
config_dirs = []
if "XDG_CONFIG_HOME" in os.environ:
config_dirs.extend(os.environ["XDG_CONFIG_HOME"].split(":"))
elif "USERPROFILE" in os.environ:
config_dirs.append(os.environ["USERPROFILE"])
elif "HOME" in os.environ:
config_dirs.append(os.path.join(os.environ["HOME"], ".config"))
config_dirs.append(os.environ["HOME"])
if "XDG_CONFIG_DIRS" in os.environ:
config_dirs.extend(os.environ["XDG_CONFIG_DIRS"].split(":"))
elif "ALLUSERSPROFILE" in os.environ:
config_dirs.append(os.environ["ALLUSERSPROFILE"])
else:
config_dirs.extend(["/usr/local/share/", "/usr/share/"])
for config_dir in config_dirs:
yield from [
os.path.join(config_dir, "jupytext"),
config_dir,
]
def find_global_jupytext_configuration_file():
"""Return the global Jupytext configuration file, if any"""
for config_dir in global_jupytext_configuration_directories():
config_file = find_jupytext_configuration_file(config_dir, False)
if config_file:
return config_file
return None
def find_jupytext_configuration_file(path, search_parent_dirs=True):
"""Return the first jupytext configuration file in the current directory, or any parent directory"""
if os.path.isdir(path):
for filename in JUPYTEXT_CONFIG_FILES:
full_path = os.path.join(path, filename)
if os.path.isfile(full_path):
return full_path
pyproject_path = os.path.join(path, PYPROJECT_FILE)
if os.path.isfile(pyproject_path):
with open(pyproject_path) as stream:
doc = tomllib.loads(stream.read())
if doc.get("tool", {}).get("jupytext") is not None:
return pyproject_path
if not search_parent_dirs:
return None
if JUPYTEXT_CEILING_DIRECTORIES and os.path.isdir(path):
for ceiling_dir in JUPYTEXT_CEILING_DIRECTORIES:
if os.path.isdir(ceiling_dir) and os.path.samefile(path, ceiling_dir):
return None
parent_dir = os.path.dirname(path)
if parent_dir == path:
return find_global_jupytext_configuration_file()
return find_jupytext_configuration_file(parent_dir)
def parse_jupytext_configuration_file(jupytext_config_file, stream=None):
"""Read a Jupytext config file, and return a dict"""
if not jupytext_config_file.endswith(".py") and stream is None:
with open(jupytext_config_file, encoding="utf-8") as stream:
return parse_jupytext_configuration_file(
jupytext_config_file, stream.read()
)
try:
if jupytext_config_file.endswith((".toml", "jupytext")):
doc = tomllib.loads(stream)
if jupytext_config_file.endswith(PYPROJECT_FILE):
return doc["tool"]["jupytext"]
else:
return doc
if jupytext_config_file.endswith((".yml", ".yaml")):
return yaml.safe_load(stream)
if jupytext_config_file.endswith(".json"):
return json.loads(stream)
return PyFileConfigLoader(jupytext_config_file).load_config()
except (ValueError, NameError) as err:
raise JupytextConfigurationError(
"The Jupytext configuration file {} is incorrect: {}".format(
jupytext_config_file, err
)
)
def load_jupytext_configuration_file(config_file, stream=None):
"""Read and validate a Jupytext configuration file, and return a JupytextConfiguration object"""
config_dict = parse_jupytext_configuration_file(config_file, stream)
config = validate_jupytext_configuration_file(config_file, config_dict)
# formats can be a dict prefix => format
if isinstance(config.formats, dict):
config.formats = [
fmt
if not prefix
else (prefix[:-1] if prefix.endswith("/") else prefix) + "///" + fmt
for prefix, fmt in config.formats.items()
]
config.formats = short_form_multiple_formats(config.formats)
if isinstance(config.notebook_extensions, str):
config.notebook_extensions = config.notebook_extensions.split(",")
return config
def load_jupytext_config(nb_file):
"""Return the jupytext configuration file in the same folder, or in a parent folder, of the current file, if any"""
config_file = find_jupytext_configuration_file(nb_file)
if config_file is None:
return None
if os.path.isfile(nb_file) and os.path.samefile(config_file, nb_file):
return None
config_file = find_jupytext_configuration_file(nb_file)
return load_jupytext_configuration_file(config_file)
def validate_jupytext_configuration_file(config_file, config_dict):
"""Turn a dict-like config into a JupytextConfiguration object"""
if config_dict is None:
return None
try:
config = JupytextConfiguration(**config_dict)
except TraitError as err:
raise JupytextConfigurationError(
"The Jupytext configuration file {} is incorrect: {}".format(
config_file, err
)
)
invalid_options = set(config_dict).difference(dir(JupytextConfiguration()))
if any(invalid_options):
raise JupytextConfigurationError(
"The Jupytext configuration file {} is incorrect: options {} are not supported".format(
config_file, ",".join(invalid_options)
)
)
return config
def notebook_formats(nbk, config, path, fallback_on_current_fmt=True):
"""Return the list of formats for the current notebook"""
metadata = nbk.get("metadata")
jupytext_metadata = metadata.get("jupytext", {})
formats = (
jupytext_metadata.get("formats")
or metadata.get("jupytext_formats")
or (config.default_formats(path) if config else None)
)
if not formats:
if not fallback_on_current_fmt:
return None
text_repr = jupytext_metadata.get("text_representation", {})
ext = os.path.splitext(path)[1]
fmt = {"extension": ext}
if ext == text_repr.get("extension") and text_repr.get("format_name"):
fmt["format_name"] = text_repr.get("format_name")
formats = [fmt]
formats = long_form_multiple_formats(
formats, metadata, auto_ext_requires_language_info=False
)
# Set preferred formats if no format name has been given yet
if config:
formats = [
preferred_format(f, config.preferred_jupytext_formats_save) for f in formats
]
return formats
"""ContentsManager that allows to open Rmd, py, R and ipynb files as notebooks
"""
import itertools
import os
try:
import tomllib
except ImportError:
import tomli as tomllib
from collections import namedtuple
from datetime import timedelta
import nbformat
from tornado.web import HTTPError
# import notebook.transutils before notebook.services.contents.filemanager #75
try:
import notebook.transutils # noqa
except ImportError:
pass
from .config import (
JUPYTEXT_CONFIG_FILES,
PYPROJECT_FILE,
JupytextConfiguration,
JupytextConfigurationError,
find_global_jupytext_configuration_file,
load_jupytext_configuration_file,
notebook_formats,
preferred_format,
)
from .formats import (
long_form_multiple_formats,
short_form_multiple_formats,
short_form_one_format,
)
from .jupytext import drop_text_representation_metadata, reads, writes
from .kernels import set_kernelspec_from_language
from .paired_paths import (
InconsistentPath,
base_path,
find_base_path_and_format,
full_path,
paired_paths,
)
from .pairs import PairedFilesDiffer, latest_inputs_and_outputs, read_pair, write_pair
@others
return JupytextContentsManager
try:
# The LargeFileManager is taken by default from jupyter_server if available
from jupyter_server.services.contents.largefilemanager import LargeFileManager
TextFileContentsManager = build_jupytext_contents_manager_class(LargeFileManager)
except ImportError:
# If we can't find jupyter_server then we take it from notebook
try:
from notebook.services.contents.largefilemanager import LargeFileManager
TextFileContentsManager = build_jupytext_contents_manager_class(
LargeFileManager
)
except ImportError:
# Older versions of notebook do not have the LargeFileManager #217
from notebook.services.contents.filemanager import FileContentsManager
TextFileContentsManager = build_jupytext_contents_manager_class(
FileContentsManager
)
@language python
@tabwidth -4
@language cweb
% This is limbo in cweb mode... It should be in \LaTeX mode, not \c mode.
% The following should not be colorized: class,if,else.
@* this is a _cweb_ comment. Code is written in \c.
"strings" should not be colorized.
It should be colored in \LaTeX mode.
The following are not keywords in latex mode: if, else, etc.
Noweb section references are _valid_ in cweb comments!
< < section ref > >
< < missing ref > >
@c
and this is C code. // It is colored in \LaTeX mode by default.
/* This is a C block comment. It may also be colored in restricted \LaTeX mode. */
// Section refs are valid in code too, of course.
< < section ref > >
< < missing ref > >
\LaTeX and \c should not be colored.
if else, while, do // C keywords.
def build_jupytext_contents_manager_class(base_contents_manager_class):
@others
"""Derives a TextFileContentsManager class from the given base class"""
class JupytextContentsManager(base_contents_manager_class, JupytextConfiguration):
"""
A FileContentsManager Class that reads and stores notebooks to classical
Jupyter notebooks (.ipynb), R Markdown notebooks (.Rmd), Julia (.jl),
Python (.py) or R scripts (.R)
"""
@others
def __init__(self, *args, **kwargs):
# Dictionary: notebook path => (fmt, formats) where
# fmt is the current format, and formats the paired formats.
self.paired_notebooks = dict()
# Configuration cache, useful when notebooks are listed in a given directory
self.cached_config = namedtuple("cached_config", "path config_file config")
self.super = super()
self.super.__init__(*args, **kwargs)
def all_nb_extensions(self, config):
"""All extensions that should be classified as notebooks"""
return [
ext if ext.startswith(".") else "." + ext
for ext in config.notebook_extensions
]
def drop_paired_notebook(self, path):
"""Remove the current notebook from the list of paired notebooks"""
if path not in self.paired_notebooks:
return
fmt, formats = self.paired_notebooks.pop(path)
prev_paired_paths = paired_paths(path, fmt, formats)
for alt_path, _ in prev_paired_paths:
if alt_path in self.paired_notebooks:
self.drop_paired_notebook(alt_path)
def update_paired_notebooks(self, path, formats):
"""Update the list of paired notebooks to include/update the current pair"""
if not formats:
self.drop_paired_notebook(path)
return
formats = long_form_multiple_formats(formats)
_, fmt = find_base_path_and_format(path, formats)
new_paired_paths = paired_paths(path, fmt, formats)
for alt_path, _ in new_paired_paths:
self.drop_paired_notebook(alt_path)
if len(formats) == 1 and set(formats[0]) <= {"extension"}:
return
short_formats = short_form_multiple_formats(formats)
for alt_path, alt_fmt in new_paired_paths:
self.paired_notebooks[alt_path] = (
short_form_one_format(alt_fmt),
short_formats,
)
def create_prefix_dir(self, path, fmt):
"""Create the prefix dir, if missing"""
if "prefix" in fmt and "/" in path:
parent_dir = self.get_parent_dir(path)
if not self.dir_exists(parent_dir):
self.create_prefix_dir(parent_dir, fmt)
self.log.info("Creating directory %s", parent_dir)
self.super.save(dict(type="directory"), parent_dir)
def save(self, model, path=""):
"""Save the file model and return the model with no content."""
if model["type"] != "notebook":
return self.super.save(model, path)
path = path.strip("/")
nbk = model["content"]
try:
config = self.get_config(path)
jupytext_formats = notebook_formats(nbk, config, path)
self.update_paired_notebooks(path, jupytext_formats)
def save_one_file(path, fmt):
if "format_name" in fmt and fmt["extension"] not in [
".md",
".markdown",
".Rmd",
]:
self.log.info(
"Saving %s in format %s:%s",
os.path.basename(path),
fmt["extension"][1:],
fmt["format_name"],
)
else:
self.log.info("Saving %s", os.path.basename(path))
self.create_prefix_dir(path, fmt)
if fmt["extension"] == ".ipynb":
return self.super.save(
dict(
type="notebook",
content=drop_text_representation_metadata(
model["content"]
),
),
path,
)
if (
model["content"]["metadata"]
.get("jupytext", {})
.get("notebook_metadata_filter")
== "-all"
):
self.log.warning(
"Stripping metadata from {} as 'Include Metadata' is off "
"(toggle 'Include Metadata' in the Jupytext Menu or Commands if desired)".format(
path
)
)
text_model = dict(
type="file",
format="text",
content=writes(
nbformat.from_dict(model["content"]), fmt=fmt, config=config
),
)
return self.super.save(text_model, path)
return write_pair(path, jupytext_formats, save_one_file)
except Exception as e:
self.log.error("Error while saving file: %s %s", path, e, exc_info=True)
raise HTTPError(500, f"Unexpected error while saving file: {path} {e}")
def get(
self,
path,
content=True,
type=None,
format=None,
load_alternative_format=True,
):
"""Takes a path for an entity and returns its model"""
path = path.strip("/")
ext = os.path.splitext(path)[1]
# Not a notebook?
if (
not self.file_exists(path)
or self.dir_exists(path)
or (type is not None and type != "notebook")
):
return self.super.get(path, content, type, format)
config = self.get_config(path, use_cache=content is False)
if ext not in self.all_nb_extensions(config):
return self.super.get(path, content, type, format)
fmt = preferred_format(ext, config.preferred_jupytext_formats_read)
if ext == ".ipynb":
model = self.super.get(path, content, type="notebook", format=format)
else:
model = self.super.get(path, content, type="file", format="text")
model["type"] = "notebook"
if content:
# We may need to update these keys, inherited from text files formats
# Cf. https://github.com/mwouts/jupytext/issues/659
model["format"] = "json"
model["mimetype"] = None
try:
model["content"] = reads(
model["content"], fmt=fmt, config=config
)
# mark all code cells from text notebooks as 'trusted'
# as they don't have any outputs, cf. #941
for cell in model["content"].cells:
if cell.cell_type == "code":
cell["metadata"]["trusted"] = True
except Exception as err:
self.log.error(
"Error while reading file: %s %s", path, err, exc_info=True
)
raise HTTPError(500, str(err))
if not load_alternative_format:
return model
# We will now read a second file if this is a paired notebooks.
if content:
nbk = model["content"]
formats = nbk.metadata.get("jupytext", {}).get(
"formats"
) or config.default_formats(path)
formats = long_form_multiple_formats(
formats, nbk.metadata, auto_ext_requires_language_info=False
)
else:
if path not in self.paired_notebooks:
return model
_, formats = self.paired_notebooks.get(path)
formats = long_form_multiple_formats(formats)
# Compute paired notebooks from formats
alt_paths = [(path, fmt)]
if formats:
try:
_, fmt = find_base_path_and_format(path, formats)
alt_paths = paired_paths(path, fmt, formats)
self.update_paired_notebooks(path, formats)
except InconsistentPath as err:
self.log.error(
"Unable to read paired notebook: %s %s",
path,
err,
exc_info=True,
)
else:
if path in self.paired_notebooks:
fmt, formats = self.paired_notebooks.get(path)
alt_paths = paired_paths(path, fmt, formats)
formats = long_form_multiple_formats(formats)
if content and len(alt_paths) > 1 and ext == ".ipynb":
# Apply default options (like saving and reloading would do)
jupytext_metadata = model["content"]["metadata"].get("jupytext", {})
config.set_default_format_options(jupytext_metadata, read=True)
if jupytext_metadata:
model["content"]["metadata"]["jupytext"] = jupytext_metadata
def get_timestamp(alt_path):
if not self.exists(alt_path):
return None
if alt_path == path:
return model["last_modified"]
return self.super.get(alt_path, content=False)["last_modified"]
def read_one_file(alt_path, alt_fmt):
if alt_path == path:
return model["content"]
if alt_path.endswith(".ipynb"):
self.log.info(f"Reading OUTPUTS from {alt_path}")
return self.super.get(
alt_path, content=True, type="notebook", format=format
)["content"]
self.log.info(f"Reading SOURCE from {alt_path}")
text = self.super.get(
alt_path, content=True, type="file", format=format
)["content"]
return reads(text, fmt=alt_fmt, config=config)
inputs, outputs = latest_inputs_and_outputs(
path, fmt, formats, get_timestamp, contents_manager_mode=True
)
# Modification time of a paired notebook is the timestamp of inputs #118 #978
model["last_modified"] = inputs.timestamp
if not content:
return model
# Before we combine the two files, we make sure we're not overwriting ipynb cells
# with an outdated text file
content = None
try:
if (
outputs.timestamp
and outputs.timestamp
> inputs.timestamp
+ timedelta(seconds=config.outdated_text_notebook_margin)
):
ts_mismatch = (
"{out} (last modified {out_last}) is more recent than "
"{src} (last modified {src_last})".format(
src=inputs.path,
src_last=inputs.timestamp,
out=outputs.path,
out_last=outputs.timestamp,
)
)
self.log.warning(ts_mismatch)
try:
content = read_pair(
inputs, outputs, read_one_file, must_match=True
)
self.log.warning(
"The inputs in {src} and {out} are identical, "
"so the mismatch in timestamps was ignored".format(
src=inputs.path, out=outputs.path
)
)
except HTTPError:
raise
except PairedFilesDiffer as diff:
raise HTTPError(
400,
"""{ts_mismatch}
Differences (jupytext --diff {src} {out}) are:
{diff}
Please either:
- open {src} in a text editor, make sure it is up to date, and save it,
- or delete {src} if not up to date,
- or increase check margin by adding, say,
outdated_text_notebook_margin = 5 # default is 1 (second)
to your jupytext.toml file
""".format(
ts_mismatch=ts_mismatch,
src=inputs.path,
out=outputs.path,
diff=diff,
),
)
except OverflowError:
pass
if content is not None:
model["content"] = content
else:
try:
model["content"] = read_pair(inputs, outputs, read_one_file)
except HTTPError:
raise
except Exception as err:
self.log.error(
"Error while reading file: %s %s", path, err, exc_info=True
)
raise HTTPError(500, str(err))
if not outputs.timestamp:
set_kernelspec_from_language(model["content"])
return model
def new_untitled(self, path="", type="", ext=""):
"""Create a new untitled file or directory in path
We override the base function because that one does not take the 'ext' argument
into account when type=="notebook". See https://github.com/mwouts/jupytext/issues/443
"""
if type != "notebook" and ext != ".ipynb":
return self.super.new_untitled(path, type, ext)
ext = ext or ".ipynb"
if ":" in ext:
ext, format_name = ext.split(":", 1)
else:
format_name = ""
path = path.strip("/")
if not self.dir_exists(path):
raise HTTPError(404, "No such directory: %s" % path)
untitled = self.untitled_notebook
config = self.get_config(path)
name = self.increment_notebook_filename(config, untitled + ext, path)
path = f"{path}/{name}"
model = {"type": "notebook"}
if format_name:
model["format"] = "json"
model["content"] = nbformat.v4.nbbase.new_notebook(
metadata={"jupytext": {"formats": ext + ":" + format_name}}
)
return self.new(model, path)
@language elisp
; Maybe...
error princ
; More typical of other lisps...
and apply
car cdr cons cond
defconst defun defvar
eq equal eval
gt ge
if
let le lt
mapcar
ne nil
or not
prog progn
set setq
t type-of
unless
when while
def increment_notebook_filename(self, config, filename, path=""):
"""Increment a notebook filename until it is unique, regardless of extension"""
# Extract the full suffix from the filename (e.g. .tar.gz)
path = path.strip("/")
basename, dot, ext = filename.partition(".")
ext = dot + ext
for i in itertools.count():
if i:
insert_i = f"{i}"
else:
insert_i = ""
basename_i = basename + insert_i
name = basename_i + ext
if not any(
self.exists(f"{path}/{basename_i}{nb_ext}")
for nb_ext in config.notebook_extensions
):
break
return name
def trust_notebook(self, path):
"""Trust the current notebook"""
if path.endswith(".ipynb") or path not in self.paired_notebooks:
self.super.trust_notebook(path)
return
fmt, formats = self.paired_notebooks[path]
for alt_path, alt_fmt in paired_paths(path, fmt, formats):
if alt_fmt["extension"] == ".ipynb":
self.super.trust_notebook(alt_path)
def rename_file(self, old_path, new_path):
"""Rename the current notebook, as well as its alternative representations"""
if old_path not in self.paired_notebooks:
try:
# we do not know yet if this is a paired notebook (#190)
# -> to get this information we open the notebook
self.get(old_path, content=True)
except Exception:
pass
if old_path not in self.paired_notebooks:
self.super.rename_file(old_path, new_path)
return
fmt, formats = self.paired_notebooks.get(old_path)
old_alt_paths = paired_paths(old_path, fmt, formats)
# Is the new file name consistent with suffix?
try:
new_base = base_path(new_path, fmt)
except HTTPError:
raise
except Exception as err:
self.log.error(
"Error while renaming file from %s to %s: %s",
old_path,
new_path,
err,
exc_info=True,
)
raise HTTPError(500, str(err))
for old_alt_path, alt_fmt in old_alt_paths:
new_alt_path = full_path(new_base, alt_fmt)
if self.exists(old_alt_path):
self.create_prefix_dir(new_alt_path, alt_fmt)
self.super.rename_file(old_alt_path, new_alt_path)
self.drop_paired_notebook(old_path)
self.update_paired_notebooks(new_path, formats)
def get_parent_dir(self, path):
"""The parent directory"""
if "/" in path:
return path.rsplit("/", 1)[0]
# jupyter-fs
if ":" in path and hasattr(self, "_managers"):
if path.endswith(":"):
return ""
return path.rsplit(":", 1)[0] + ":"
return ""
def get_config_file(self, directory):
"""Return the jupytext configuration file, if any"""
for jupytext_config_file in JUPYTEXT_CONFIG_FILES:
path = directory + "/" + jupytext_config_file
if self.file_exists(path):
if not self.allow_hidden and jupytext_config_file.startswith("."):
self.log.warning(
f"Ignoring config file {path} (see Jupytext issue #964)"
)
continue
return path
pyproject_path = directory + "/" + PYPROJECT_FILE
if self.file_exists(pyproject_path):
model = self.get(pyproject_path, type="file")
try:
doc = tomllib.loads(model["content"])
except tomllib.TOMLDecodeError as e:
self.log.warning(f"Cannot load {pyproject_path}: {e}")
else:
if doc.get("tool", {}).get("jupytext") is not None:
return pyproject_path
if not directory:
return None
parent_dir = self.get_parent_dir(directory)
return self.get_config_file(parent_dir)
def load_config_file(
self, config_file, *, prev_config_file, prev_config, is_os_path=False
):
"""Load the configuration file"""
if config_file is None:
return None
if config_file.endswith(".py") and not is_os_path:
config_file = self._get_os_path(config_file)
is_os_path = True
config_content = None
if not is_os_path:
try:
model = self.super.get(config_file, content=True, type="file")
config_content = model["content"]
except HTTPError:
pass
config = load_jupytext_configuration_file(config_file, config_content)
if config is None:
return config
log_level = config.cm_config_log_level
if log_level == "info_if_changed":
if config_file != prev_config_file or config != prev_config:
log_level = "info"
else:
log_level = "none"
if log_level != "none":
getattr(self.log, log_level)(
"Loaded Jupytext configuration file at %s", config_file
)
return config
def get_config(self, path, use_cache=False):
"""Return the Jupytext configuration for the given path"""
parent_dir = self.get_parent_dir(path)
# When listing the notebooks for the tree view, we use a cache for the configuration file
# The cache will be refreshed when a notebook is opened or saved, or when we go
# to a different directory.
if not use_cache or parent_dir != self.cached_config.path:
try:
config_file = self.get_config_file(parent_dir)
if config_file:
self.cached_config.config = self.load_config_file(
config_file,
prev_config_file=self.cached_config.config_file,
prev_config=self.cached_config.config,
)
else:
config_file = find_global_jupytext_configuration_file()
self.cached_config.config = self.load_config_file(
config_file,
prev_config_file=self.cached_config.config_file,
prev_config=self.cached_config.config,
is_os_path=True,
)
self.cached_config.config_file = config_file
self.cached_config.path = parent_dir
except JupytextConfigurationError as err:
self.log.error(
"Error while reading config file: %s %s",
config_file,
err,
exc_info=True,
)
raise HTTPError(500, f"{err}")
if self.cached_config.config is not None:
return self.cached_config.config
if isinstance(self.notebook_extensions, str):
self.notebook_extensions = self.notebook_extensions.split(",")
return self
"""Convert Markdown equations to doxygen equations and back
See https://github.com/mwouts/jupytext/issues/517"""
import re
@others
@language python
@tabwidth -4
def markdown_to_doxygen(string):
"""Markdown to Doxygen equations"""
long_equations = re.sub(
r"(?<!\\)\$\$(.*?)(?<!\\)\$\$", r"\\f[\g<1>\\f]", string, flags=re.DOTALL
)
inline_equations = re.sub(r"(?<!(\\|\$))\$(?!\$)", r"\\f$", long_equations)
return inline_equations
def doxygen_to_markdown(string):
"""Doxygen to Markdown equations"""
long_equations = re.sub(r"\\f\[(.*?)\\f\]", r"$$\g<1>$$", string, flags=re.DOTALL)
inline_equations = re.sub(r"\\f\$", "$", long_equations)
return inline_equations
"""
In this file the various text notebooks formats are defined. Please contribute
new formats here!
"""
import os
import re
import warnings
import nbformat
import yaml
from .cell_reader import (
DoublePercentScriptCellReader,
HydrogenCellReader,
LightScriptCellReader,
MarkdownCellReader,
RMarkdownCellReader,
RScriptCellReader,
SphinxGalleryScriptCellReader,
)
from .cell_to_text import (
BareScriptCellExporter,
DoublePercentCellExporter,
HydrogenCellExporter,
LightScriptCellExporter,
MarkdownCellExporter,
RMarkdownCellExporter,
RScriptCellExporter,
SphinxGalleryCellExporter,
)
from .header import header_to_metadata_and_cell, insert_or_test_version_number
from .languages import _COMMENT_CHARS, _SCRIPT_EXTENSIONS, same_language
from .magics import is_magic
from .metadata_filter import metadata_filter_as_string
from .myst import (
MYST_FORMAT_NAME,
is_myst_available,
matches_mystnb,
myst_extensions,
myst_version,
)
from .pandoc import is_pandoc_available, pandoc_version
from .stringparser import StringParser
from .version import __version__
@others
@language python
@tabwidth -4
class JupytextFormatError(ValueError):
"""Error in the specification of the format for the text notebook"""
class NotebookFormatDescription:
"""Description of a notebook format"""
@others
def __init__(
self,
format_name,
extension,
header_prefix,
cell_reader_class,
cell_exporter_class,
current_version_number,
header_suffix="",
min_readable_version_number=None,
):
self.format_name = format_name
self.extension = extension
self.header_prefix = header_prefix
self.header_suffix = header_suffix
self.cell_reader_class = cell_reader_class
self.cell_exporter_class = cell_exporter_class
self.current_version_number = current_version_number
self.min_readable_version_number = min_readable_version_number
JUPYTEXT_FORMATS = (
[
NotebookFormatDescription(
format_name="markdown",
extension=".md",
header_prefix="",
cell_reader_class=MarkdownCellReader,
cell_exporter_class=MarkdownCellExporter,
# Version 1.0 on 2018-08-31 - jupytext v0.6.0 : Initial version
# Version 1.1 on 2019-03-24 - jupytext v1.1.0 : Markdown regions and cell metadata
# Version 1.2 on 2019-09-21 - jupytext v1.3.0 : Raw regions are now encoded with HTML comments (#321)
# and by default, cell metadata use the key=value representation (#347)
# Version 1.3 on 2021-01-24 - jupytext v1.10.0 : Code cells may start with more than three backticks (#712)
current_version_number="1.3",
min_readable_version_number="1.0",
),
NotebookFormatDescription(
format_name="markdown",
extension=".markdown",
header_prefix="",
cell_reader_class=MarkdownCellReader,
cell_exporter_class=MarkdownCellExporter,
current_version_number="1.2",
min_readable_version_number="1.0",
),
NotebookFormatDescription(
format_name="rmarkdown",
extension=".Rmd",
header_prefix="",
cell_reader_class=RMarkdownCellReader,
cell_exporter_class=RMarkdownCellExporter,
# Version 1.0 on 2018-08-22 - jupytext v0.5.2 : Initial version
# Version 1.1 on 2019-03-24 - jupytext v1.1.0 : Markdown regions and cell metadata
# Version 1.2 on 2019-09-21 - jupytext v1.3.0 : Raw regions are now encoded with HTML comments (#321)
# and by default, cell metadata use the key=value representation in raw and markdown cells (#347)
current_version_number="1.2",
min_readable_version_number="1.0",
),
]
+ [
NotebookFormatDescription(
format_name="light",
extension=ext,
header_prefix=_SCRIPT_EXTENSIONS[ext]["comment"],
header_suffix=_SCRIPT_EXTENSIONS[ext].get("comment_suffix", ""),
cell_reader_class=LightScriptCellReader,
cell_exporter_class=LightScriptCellExporter,
# Version 1.5 on 2019-10-19 - jupytext v1.3.0 - Cell metadata represented as key=value by default
# Version 1.4 on 2019-03-30 - jupytext v1.1.0 - custom cell markers allowed
# Version 1.3 on 2018-09-22 - jupytext v0.7.0rc0 : Metadata are
# allowed for all cell types (and then include 'cell_type')
# Version 1.2 on 2018-09-05 - jupytext v0.6.3 : Metadata bracket
# can be omitted when empty, if previous line is empty #57
# Version 1.1 on 2018-08-25 - jupytext v0.6.0 : Cells separated
# with one blank line #38
# Version 1.0 on 2018-08-22 - jupytext v0.5.2 : Initial version
current_version_number="1.5",
min_readable_version_number="1.1",
)
for ext in _SCRIPT_EXTENSIONS
]
+ [
NotebookFormatDescription(
format_name="nomarker",
extension=ext,
header_prefix=_SCRIPT_EXTENSIONS[ext]["comment"],
header_suffix=_SCRIPT_EXTENSIONS[ext].get("comment_suffix", ""),
cell_reader_class=LightScriptCellReader,
cell_exporter_class=BareScriptCellExporter,
current_version_number="1.0",
min_readable_version_number="1.0",
)
for ext in _SCRIPT_EXTENSIONS
]
+ [
NotebookFormatDescription(
format_name="percent",
extension=ext,
header_prefix=_SCRIPT_EXTENSIONS[ext]["comment"],
header_suffix=_SCRIPT_EXTENSIONS[ext].get("comment_suffix", ""),
cell_reader_class=DoublePercentScriptCellReader,
cell_exporter_class=DoublePercentCellExporter,
# Version 1.3 on 2019-09-21 - jupytext v1.3.0: Markdown cells can be quoted using triple quotes #305
# and cell metadata are represented as key=value by default
# Version 1.2 on 2018-11-18 - jupytext v0.8.6: Jupyter magics are commented by default #126, #132
# Version 1.1 on 2018-09-23 - jupytext v0.7.0rc1 : [markdown] and
# [raw] for markdown and raw cells.
# Version 1.0 on 2018-09-22 - jupytext v0.7.0rc0 : Initial version
current_version_number="1.3",
min_readable_version_number="1.1",
)
for ext in _SCRIPT_EXTENSIONS
]
+ [
NotebookFormatDescription(
format_name="hydrogen",
extension=ext,
header_prefix=_SCRIPT_EXTENSIONS[ext]["comment"],
header_suffix=_SCRIPT_EXTENSIONS[ext].get("comment_suffix", ""),
cell_reader_class=HydrogenCellReader,
cell_exporter_class=HydrogenCellExporter,
# Version 1.2 on 2018-12-14 - jupytext v0.9.0: same as percent - only magics are not commented by default
current_version_number="1.3",
min_readable_version_number="1.1",
)
for ext in _SCRIPT_EXTENSIONS
]
+ [
NotebookFormatDescription(
format_name="spin",
extension=ext,
header_prefix="#'",
cell_reader_class=RScriptCellReader,
cell_exporter_class=RScriptCellExporter,
# Version 1.0 on 2018-08-22 - jupytext v0.5.2 : Initial version
current_version_number="1.0",
)
for ext in [".r", ".R"]
]
+ [
NotebookFormatDescription(
format_name="sphinx",
extension=".py",
header_prefix="#",
cell_reader_class=SphinxGalleryScriptCellReader,
cell_exporter_class=SphinxGalleryCellExporter,
# Version 1.0 on 2018-09-22 - jupytext v0.7.0rc0 : Initial version
current_version_number="1.1",
),
NotebookFormatDescription(
format_name="pandoc",
extension=".md",
header_prefix="",
cell_reader_class=None,
cell_exporter_class=None,
current_version_number=pandoc_version(),
),
NotebookFormatDescription(
format_name="quarto",
extension=".qmd",
header_prefix="",
cell_reader_class=None,
cell_exporter_class=None,
# Version 1.0 on 2021-09-07 = quarto --version >= 0.2.134,
# cf. https://github.com/mwouts/jupytext/issues/837
current_version_number="1.0",
),
]
+ [
NotebookFormatDescription(
format_name=MYST_FORMAT_NAME,
extension=ext,
header_prefix="",
cell_reader_class=None,
cell_exporter_class=None,
current_version_number=myst_version(),
)
for ext in myst_extensions()
]
)
NOTEBOOK_EXTENSIONS = list(
dict.fromkeys([".ipynb"] + [fmt.extension for fmt in JUPYTEXT_FORMATS])
)
EXTENSION_PREFIXES = [".lgt", ".spx", ".pct", ".hyd", ".nb"]
def get_format_implementation(ext, format_name=None):
"""Return the implementation for the desired format"""
# remove pre-extension if any
ext = "." + ext.split(".")[-1]
formats_for_extension = []
for fmt in JUPYTEXT_FORMATS:
if fmt.extension == ext:
if fmt.format_name == format_name or not format_name:
return fmt
formats_for_extension.append(fmt.format_name)
if formats_for_extension:
raise JupytextFormatError(
"Format '{}' is not associated to extension '{}'. "
"Please choose one of: {}.".format(
format_name, ext, ", ".join(formats_for_extension)
)
)
raise JupytextFormatError(f"No format associated to extension '{ext}'")
def read_metadata(text, ext):
"""Return the header metadata"""
ext = "." + ext.split(".")[-1]
lines = text.splitlines()
if ext in [".md", ".markdown", ".Rmd"]:
comment = comment_suffix = ""
else:
comment = _SCRIPT_EXTENSIONS.get(ext, {}).get("comment", "#")
comment_suffix = _SCRIPT_EXTENSIONS.get(ext, {}).get("comment_suffix", "")
metadata, _, _, _ = header_to_metadata_and_cell(lines, comment, comment_suffix, ext)
if ext in [".r", ".R"] and not metadata:
metadata, _, _, _ = header_to_metadata_and_cell(lines, "#'", "", ext)
# MyST has the metadata at the root level
if not metadata and ext in myst_extensions() and text.startswith("---"):
for header in yaml.safe_load_all(text):
if not isinstance(header, dict):
continue
if (
header.get("jupytext", {})
.get("text_representation", {})
.get("format_name")
== "myst"
):
return header
return metadata
return metadata
def read_format_from_metadata(text, ext):
"""Return the format of the file, when that information is available from the metadata"""
metadata = read_metadata(text, ext)
rearrange_jupytext_metadata(metadata)
return format_name_for_ext(metadata, ext, explicit_default=False)
def guess_format(text, ext):
"""Guess the format and format options of the file, given its extension and content"""
metadata = read_metadata(text, ext)
if "text_representation" in metadata.get("jupytext", {}):
return format_name_for_ext(metadata, ext), {}
if is_myst_available() and ext in myst_extensions() and matches_mystnb(text, ext):
return MYST_FORMAT_NAME, {}
lines = text.splitlines()
# Is this a Hydrogen-like script?
# Or a Sphinx-gallery script?
if ext in _SCRIPT_EXTENSIONS:
unescaped_comment = _SCRIPT_EXTENSIONS[ext]["comment"]
comment = re.escape(unescaped_comment)
language = _SCRIPT_EXTENSIONS[ext]["language"]
twenty_hash_re = re.compile(r"^#( |)#{19,}\s*$")
double_percent_re = re.compile(rf"^{comment}( %%|%%)$")
double_percent_and_space_re = re.compile(rf"^{comment}( %%|%%)\s")
nbconvert_script_re = re.compile(rf"^{comment}( <codecell>| In\[[0-9 ]*\]:?)")
vim_folding_markers_re = re.compile(rf"^{comment}\s*" + "{{{")
vscode_folding_markers_re = re.compile(rf"^{comment}\s*region")
twenty_hash_count = 0
double_percent_count = 0
magic_command_count = 0
rspin_comment_count = 0
vim_folding_markers_count = 0
vscode_folding_markers_count = 0
parser = StringParser(language="R" if ext in [".r", ".R"] else "python")
for line in lines:
parser.read_line(line)
if parser.is_quoted():
continue
# Don't count escaped Jupyter magics (no space between %% and command) as cells
if (
double_percent_re.match(line)
or double_percent_and_space_re.match(line)
or nbconvert_script_re.match(line)
):
double_percent_count += 1
if not line.startswith(unescaped_comment) and is_magic(line, language):
magic_command_count += 1
if twenty_hash_re.match(line) and ext == ".py":
twenty_hash_count += 1
if line.startswith("#'") and ext in [".R", ".r"]:
rspin_comment_count += 1
if vim_folding_markers_re.match(line):
vim_folding_markers_count += 1
if vscode_folding_markers_re.match(line):
vscode_folding_markers_count += 1
if double_percent_count >= 1:
if magic_command_count:
return "hydrogen", {}
return "percent", {}
if vim_folding_markers_count:
return "light", {"cell_markers": "{{{,}}}"}
if vscode_folding_markers_count:
return "light", {"cell_markers": "region,endregion"}
if twenty_hash_count >= 2:
return "sphinx", {}
if rspin_comment_count >= 1:
return "spin", {}
if ext in [".md", ".markdown"]:
for line in lines:
if line.startswith(":::"): # Pandoc div
return "pandoc", {}
# Default format
return get_format_implementation(ext).format_name, {}
def divine_format(text):
"""Guess the format of the notebook, based on its content #148"""
try:
nbformat.reads(text, as_version=4)
return "ipynb"
except nbformat.reader.NotJSONError:
pass
lines = text.splitlines()
for comment in ["", "#"] + _COMMENT_CHARS:
metadata, _, _, _ = header_to_metadata_and_cell(lines, comment, "")
ext = (
metadata.get("jupytext", {}).get("text_representation", {}).get("extension")
)
if ext:
return ext[1:] + ":" + guess_format(text, ext)[0]
# No metadata, but ``` on at least one line => markdown
for line in lines:
if line == "```":
return "md"
return "py:" + guess_format(text, ".py")[0]
def check_file_version(notebook, source_path, outputs_path):
"""Raise if file version in source file would override outputs"""
if not insert_or_test_version_number():
return
_, ext = os.path.splitext(source_path)
assert not ext.endswith(".ipynb"), "source_path={} should be a text file".format(
source_path
)
version = (
notebook.metadata.get("jupytext", {})
.get("text_representation", {})
.get("format_version")
)
format_name = format_name_for_ext(notebook.metadata, ext)
fmt = get_format_implementation(ext, format_name)
current = fmt.current_version_number
# Missing version, still generated by jupytext?
if notebook.metadata and not version:
version = current
# Same version? OK
if version == fmt.current_version_number:
return
# Version larger than minimum readable version
if (fmt.min_readable_version_number or current) <= version <= current:
return
jupytext_version_in_file = (
notebook.metadata.get("jupytext", {})
.get("text_representation", {})
.get("jupytext_version", "N/A")
)
raise JupytextFormatError(
"The file {source_path} was generated with jupytext version {jupytext_version_in_file} "
"but you have {jupytext_version} installed. Please upgrade jupytext to version "
"{jupytext_version_in_file}, or remove either {source_path} or {output_path}. "
"This error occurs because {source_path} is in the {format_name} format in version {format_version}, "
"while jupytext version {jupytext_version} installed at {jupytext_path} can only read the "
"{format_name} format in versions {min_format_version} to {current_format_version}.".format(
source_path=os.path.basename(source_path),
output_path=os.path.basename(outputs_path),
format_name=format_name,
format_version=version,
jupytext_version_in_file=jupytext_version_in_file,
jupytext_version=__version__,
jupytext_path=os.path.dirname(os.path.dirname(__file__)),
min_format_version=fmt.min_readable_version_number or current,
current_format_version=current,
)
)
def format_name_for_ext(metadata, ext, cm_default_formats=None, explicit_default=True):
"""Return the format name for that extension"""
# Is the format information available in the text representation?
text_repr = metadata.get("jupytext", {}).get("text_representation", {})
if text_repr.get("extension", "").endswith(ext) and text_repr.get("format_name"):
return text_repr.get("format_name")
# Format from jupytext.formats
formats = metadata.get("jupytext", {}).get("formats", "") or cm_default_formats
formats = long_form_multiple_formats(formats)
for fmt in formats:
if fmt["extension"] == ext:
if (not explicit_default) or fmt.get("format_name"):
return fmt.get("format_name")
if (not explicit_default) or ext in [".md", ".markdown", ".Rmd"]:
return None
return get_format_implementation(ext).format_name
def identical_format_path(fmt1, fmt2):
"""Do the two (long representation) of formats target the same file?"""
for key in ["extension", "prefix", "suffix"]:
if fmt1.get(key) != fmt2.get(key):
return False
return True
def update_jupytext_formats_metadata(metadata, new_format):
"""Update the jupytext_format metadata in the Jupyter notebook"""
new_format = long_form_one_format(new_format)
formats = long_form_multiple_formats(
metadata.get("jupytext", {}).get("formats", "")
)
if not formats:
return
for fmt in formats:
if identical_format_path(fmt, new_format):
fmt["format_name"] = new_format.get("format_name")
break
metadata.setdefault("jupytext", {})["formats"] = short_form_multiple_formats(
formats
)
def rearrange_jupytext_metadata(metadata):
"""Convert the jupytext_formats metadata entry to jupytext/formats, etc. See #91"""
# Backward compatibility with nbrmd
for key in ["nbrmd_formats", "nbrmd_format_version"]:
if key in metadata:
metadata[key.replace("nbrmd", "jupytext")] = metadata.pop(key)
jupytext_metadata = metadata.pop("jupytext", {})
if "jupytext_formats" in metadata:
jupytext_metadata["formats"] = metadata.pop("jupytext_formats")
if "jupytext_format_version" in metadata:
jupytext_metadata["text_representation"] = {
"format_version": metadata.pop("jupytext_format_version")
}
if "main_language" in metadata:
jupytext_metadata["main_language"] = metadata.pop("main_language")
for entry in ["encoding", "executable"]:
if entry in metadata:
jupytext_metadata[entry] = metadata.pop(entry)
filters = jupytext_metadata.pop("metadata_filter", {})
if "notebook" in filters:
jupytext_metadata["notebook_metadata_filter"] = filters["notebook"]
if "cells" in filters:
jupytext_metadata["cell_metadata_filter"] = filters["cells"]
for filter_level in ["notebook_metadata_filter", "cell_metadata_filter"]:
if filter_level in jupytext_metadata:
jupytext_metadata[filter_level] = metadata_filter_as_string(
jupytext_metadata[filter_level]
)
if (
jupytext_metadata.get("text_representation", {})
.get("jupytext_version", "")
.startswith("0.")
):
formats = jupytext_metadata.get("formats")
if formats:
jupytext_metadata["formats"] = ",".join(
["." + fmt if fmt.rfind(".") > 0 else fmt for fmt in formats.split(",")]
)
# auto to actual extension
formats = jupytext_metadata.get("formats")
if formats:
jupytext_metadata["formats"] = short_form_multiple_formats(
long_form_multiple_formats(formats, metadata)
)
if jupytext_metadata:
metadata["jupytext"] = jupytext_metadata
def long_form_one_format(
jupytext_format, metadata=None, update=None, auto_ext_requires_language_info=True
):
"""Parse 'sfx.py:percent' into {'suffix':'sfx', 'extension':'py', 'format_name':'percent'}"""
if isinstance(jupytext_format, dict):
if update:
jupytext_format.update(update)
return validate_one_format(jupytext_format)
if not jupytext_format:
return {}
common_name_to_ext = {
"notebook": "ipynb",
"rmarkdown": "Rmd",
"quarto": "qmd",
"markdown": "md",
"script": "auto",
"c++": "cpp",
"myst": "md:myst",
"pandoc": "md:pandoc",
}
if jupytext_format.lower() in common_name_to_ext:
jupytext_format = common_name_to_ext[jupytext_format.lower()]
fmt = {}
if jupytext_format.rfind("/") > 0:
fmt["prefix"], jupytext_format = jupytext_format.rsplit("/", 1)
if jupytext_format.rfind(":") >= 0:
ext, fmt["format_name"] = jupytext_format.rsplit(":", 1)
if fmt["format_name"] == "bare":
warnings.warn(
"The `bare` format has been renamed to `nomarker` - (see https://github.com/mwouts/jupytext/issues/397)",
DeprecationWarning,
)
fmt["format_name"] = "nomarker"
elif (
not jupytext_format
or "." in jupytext_format
or ("." + jupytext_format) in NOTEBOOK_EXTENSIONS + [".auto"]
):
ext = jupytext_format
elif jupytext_format in _VALID_FORMAT_NAMES:
fmt["format_name"] = jupytext_format
ext = ""
else:
raise JupytextFormatError(
"'{}' is not a notebook extension (one of {}), nor a notebook format (one of {})".format(
jupytext_format,
", ".join(NOTEBOOK_EXTENSIONS),
", ".join(_VALID_FORMAT_NAMES),
)
)
if ext.rfind(".") > 0:
fmt["suffix"], ext = os.path.splitext(ext)
if not ext.startswith("."):
ext = "." + ext
if ext == ".auto":
ext = auto_ext_from_metadata(metadata) if metadata is not None else ".auto"
if not ext:
if auto_ext_requires_language_info:
raise JupytextFormatError(
"No language information in this notebook. Please replace 'auto' with "
"an actual script extension."
)
ext = ".auto"
fmt["extension"] = ext
if update:
fmt.update(update)
return validate_one_format(fmt)
def long_form_multiple_formats(
jupytext_formats, metadata=None, auto_ext_requires_language_info=True
):
"""Convert a concise encoding of jupytext.formats to a list of formats, encoded as dictionaries"""
if not jupytext_formats:
return []
if not isinstance(jupytext_formats, list):
jupytext_formats = [fmt for fmt in jupytext_formats.split(",") if fmt]
jupytext_formats = [
long_form_one_format(
fmt,
metadata,
auto_ext_requires_language_info=auto_ext_requires_language_info,
)
for fmt in jupytext_formats
]
if not auto_ext_requires_language_info:
jupytext_formats = [
fmt for fmt in jupytext_formats if fmt["extension"] != ".auto"
]
return jupytext_formats
def short_form_one_format(jupytext_format):
"""Represent one jupytext format as a string"""
if not isinstance(jupytext_format, dict):
return jupytext_format
fmt = jupytext_format["extension"]
if "suffix" in jupytext_format:
fmt = jupytext_format["suffix"] + fmt
elif fmt.startswith("."):
fmt = fmt[1:]
if "prefix" in jupytext_format:
fmt = jupytext_format["prefix"] + "/" + fmt
if jupytext_format.get("format_name"):
if jupytext_format["extension"] not in [
".md",
".markdown",
".Rmd",
] or jupytext_format["format_name"] in ["pandoc", MYST_FORMAT_NAME]:
fmt = fmt + ":" + jupytext_format["format_name"]
return fmt
def short_form_multiple_formats(jupytext_formats):
"""Convert jupytext formats, represented as a list of dictionaries, to a comma separated list"""
if not isinstance(jupytext_formats, list):
return jupytext_formats
jupytext_formats = [short_form_one_format(fmt) for fmt in jupytext_formats]
return ",".join(jupytext_formats)
_VALID_FORMAT_INFO = ["extension", "format_name", "suffix", "prefix"]
_BINARY_FORMAT_OPTIONS = [
"comment_magics",
"hide_notebook_metadata",
"root_level_metadata_as_raw_cell",
"split_at_heading",
"rst2md",
"cell_metadata_json",
"use_runtools",
"doxygen_equation_markers",
]
_VALID_FORMAT_OPTIONS = _BINARY_FORMAT_OPTIONS + [
"notebook_metadata_filter",
"cell_metadata_filter",
"cell_markers",
"custom_cell_magics",
]
_VALID_FORMAT_NAMES = {fmt.format_name for fmt in JUPYTEXT_FORMATS}
def validate_one_format(jupytext_format):
"""Validate extension and options for the given format"""
if not isinstance(jupytext_format, dict):
raise JupytextFormatError("Jupytext format should be a dictionary")
if (
"format_name" in jupytext_format
and jupytext_format["format_name"] not in _VALID_FORMAT_NAMES
):
raise JupytextFormatError(
"{} is not a valid format name. Please choose one of {}".format(
jupytext_format.get("format_name"), ", ".join(_VALID_FORMAT_NAMES)
)
)
for key in jupytext_format:
if key not in _VALID_FORMAT_INFO + _VALID_FORMAT_OPTIONS:
raise JupytextFormatError(
"Unknown format option '{}' - should be one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)
)
)
value = jupytext_format[key]
if key in _BINARY_FORMAT_OPTIONS:
if not isinstance(value, bool):
raise JupytextFormatError(
"Format option '{}' should be a bool, not '{}'".format(
key, str(value)
)
)
if "extension" not in jupytext_format:
raise JupytextFormatError("Missing format extension")
ext = jupytext_format["extension"]
if ext not in NOTEBOOK_EXTENSIONS + [".auto"]:
raise JupytextFormatError(
"Extension '{}' is not a notebook extension. Please use one of '{}'.".format(
ext, "', '".join(NOTEBOOK_EXTENSIONS + [".auto"])
)
)
return jupytext_format
def auto_ext_from_metadata(metadata):
"""Script extension from notebook metadata"""
auto_ext = metadata.get("language_info", {}).get("file_extension")
# Sage notebooks have ".py" as the associated extension in "language_info",
# so we change it to ".sage" in that case, see #727
if auto_ext == ".py" and metadata.get("kernelspec", {}).get("language") == "sage":
auto_ext = ".sage"
if auto_ext is None:
language = metadata.get("kernelspec", {}).get("language") or metadata.get(
"jupytext", {}
).get("main_language")
if language:
for ext in _SCRIPT_EXTENSIONS:
if same_language(language, _SCRIPT_EXTENSIONS[ext]["language"]):
auto_ext = ext
break
if auto_ext == ".r":
return ".R"
if auto_ext == ".fs":
return ".fsx"
if auto_ext == ".resource":
return ".robot"
return auto_ext
def check_auto_ext(fmt, metadata, option):
"""Replace the auto extension with the actual file extension, and raise a ValueError if it cannot be determined"""
if fmt["extension"] != ".auto":
return fmt
auto_ext = auto_ext_from_metadata(metadata)
if auto_ext:
fmt = fmt.copy()
fmt["extension"] = auto_ext
return fmt
raise ValueError(
"The notebook does not have a 'language_info' metadata. "
"Please replace 'auto' with the actual language extension in the {} option (currently {}).".format(
option, short_form_one_format(fmt)
)
)
def formats_with_support_for_cell_metadata():
for fmt in JUPYTEXT_FORMATS:
if fmt.format_name == "myst" and not is_myst_available():
continue
if fmt.format_name == "pandoc" and not is_pandoc_available():
continue
if fmt.format_name not in ["sphinx", "nomarker", "spin", "quarto"]:
yield f"{fmt.extension[1:]}:{fmt.format_name}"
"""Parse header of text notebooks
"""
import re
import nbformat
import yaml
from nbformat.v4.nbbase import new_raw_cell
from yaml.representer import SafeRepresenter
from .languages import (
_SCRIPT_EXTENSIONS,
comment_lines,
default_language_from_metadata_and_ext,
)
from .metadata_filter import _DEFAULT_NOTEBOOK_METADATA, filter_metadata
from .pep8 import pep8_lines_between_cells
from .version import __version__
SafeRepresenter.add_representer(nbformat.NotebookNode, SafeRepresenter.represent_dict)
_HEADER_RE = re.compile(r"^---\s*$")
_BLANK_RE = re.compile(r"^\s*$")
_JUPYTER_RE = re.compile(r"^jupyter\s*:\s*$")
_LEFTSPACE_RE = re.compile(r"^\s")
_UTF8_HEADER = " -*- coding: utf-8 -*-"
# Change this to False in tests
INSERT_AND_CHECK_VERSION_NUMBER = True
@others
@language python
@tabwidth -4
def insert_or_test_version_number():
"""Should the format name and version number be inserted in text
representations (not in tests!)"""
return INSERT_AND_CHECK_VERSION_NUMBER
def uncomment_line(line, prefix, suffix=""):
"""Remove prefix (and space) from line"""
if prefix:
if line.startswith(prefix + " "):
line = line[len(prefix) + 1 :]
elif line.startswith(prefix):
line = line[len(prefix) :]
if suffix:
if line.endswith(suffix + " "):
line = line[: -(1 + len(suffix))]
elif line.endswith(suffix):
line = line[: -len(suffix)]
return line
def encoding_and_executable(notebook, metadata, ext):
"""Return encoding and executable lines for a notebook, if applicable"""
lines = []
comment = _SCRIPT_EXTENSIONS.get(ext, {}).get("comment")
jupytext_metadata = metadata.get("jupytext", {})
if comment is not None and "executable" in jupytext_metadata:
lines.append("#!" + jupytext_metadata.pop("executable"))
if comment is not None:
if "encoding" in jupytext_metadata:
lines.append(jupytext_metadata.pop("encoding"))
elif default_language_from_metadata_and_ext(metadata, ext) != "python":
for cell in notebook.cells:
try:
cell.source.encode("ascii")
except (UnicodeEncodeError, UnicodeDecodeError):
lines.append(comment + _UTF8_HEADER)
break
return lines
def insert_jupytext_info_and_filter_metadata(
metadata, fmt, text_format, unsupported_keys
):
"""Update the notebook metadata to include Jupytext information, and filter
the notebook metadata according to the default or user filter"""
if insert_or_test_version_number():
metadata.setdefault("jupytext", {})["text_representation"] = {
"extension": fmt["extension"],
"format_name": text_format.format_name,
"format_version": text_format.current_version_number,
"jupytext_version": __version__,
}
if "jupytext" in metadata and not metadata["jupytext"]:
del metadata["jupytext"]
notebook_metadata_filter = fmt.get("notebook_metadata_filter")
return filter_metadata(
metadata,
notebook_metadata_filter,
_DEFAULT_NOTEBOOK_METADATA,
unsupported_keys=unsupported_keys,
)
def metadata_and_cell_to_header(
notebook, metadata, text_format, fmt, unsupported_keys=None
):
"""
Return the text header corresponding to a notebook, and remove the
first cell of the notebook if it contained the header
"""
header = []
lines_to_next_cell = None
root_level_metadata = {}
root_level_metadata_as_raw_cell = fmt.get("root_level_metadata_as_raw_cell", True)
if not root_level_metadata_as_raw_cell:
root_level_metadata = metadata.get("jupytext", {}).pop(
"root_level_metadata", {}
)
elif notebook.cells:
cell = notebook.cells[0]
if cell.cell_type == "raw":
lines = cell.source.strip("\n\t ").splitlines()
if (
len(lines) >= 2
and _HEADER_RE.match(lines[0])
and _HEADER_RE.match(lines[-1])
):
header = lines[1:-1]
lines_to_next_cell = cell.metadata.get("lines_to_next_cell")
notebook.cells = notebook.cells[1:]
metadata = insert_jupytext_info_and_filter_metadata(
metadata, fmt, text_format, unsupported_keys
)
if metadata:
root_level_metadata["jupyter"] = metadata
if root_level_metadata:
header.extend(
yaml.safe_dump(root_level_metadata, default_flow_style=False).splitlines()
)
if header:
header = ["---"] + header + ["---"]
if (
fmt.get("hide_notebook_metadata", False)
and text_format.format_name == "markdown"
):
header = ["<!--", ""] + header + ["", "-->"]
return (
comment_lines(header, text_format.header_prefix, text_format.header_suffix),
lines_to_next_cell,
)
def recursive_update(target, update):
"""Update recursively a (nested) dictionary with the content of another.
Inspired from https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
"""
for key in update:
value = update[key]
if value is None:
del target[key]
elif isinstance(value, dict):
target[key] = recursive_update(target.get(key, {}), value)
else:
target[key] = value
return target
def header_to_metadata_and_cell(
lines, header_prefix, header_suffix, ext=None, root_level_metadata_as_raw_cell=True
):
"""
Return the metadata, a boolean to indicate if a jupyter section was found,
the first cell of notebook if some metadata is found outside
the jupyter section, and next loc in text
"""
header = []
jupyter = []
in_jupyter = False
in_html_div = False
start = 0
started = False
ended = False
metadata = {}
i = -1
comment = "#" if header_prefix == "#'" else header_prefix
encoding_re = re.compile(
rf"^[ \t\f]*{re.escape(comment)}.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)"
)
for i, line in enumerate(lines):
if i == 0 and line.startswith("#!"):
metadata.setdefault("jupytext", {})["executable"] = line[2:]
start = i + 1
continue
if i == 0 or (i == 1 and not encoding_re.match(lines[0])):
encoding = encoding_re.match(line)
if encoding:
if encoding.group(1) != "utf-8":
raise ValueError("Encodings other than utf-8 are not supported")
metadata.setdefault("jupytext", {})["encoding"] = line
start = i + 1
continue
if not line.startswith(header_prefix):
break
if not comment:
if line.strip().startswith("<!--"):
in_html_div = True
continue
if in_html_div:
if ended:
if "-->" in line:
break
if not started and not line.strip():
continue
line = uncomment_line(line, header_prefix, header_suffix)
if _HEADER_RE.match(line):
if not started:
started = True
continue
ended = True
if in_html_div:
continue
break
# Stop if there is something else than a YAML header
if not started and line.strip():
break
if _JUPYTER_RE.match(line):
in_jupyter = True
elif line and not _LEFTSPACE_RE.match(line):
in_jupyter = False
if in_jupyter:
jupyter.append(line)
else:
header.append(line)
if ended:
if jupyter:
extra_metadata = metadata
metadata = yaml.safe_load("\n".join(jupyter))["jupyter"]
recursive_update(metadata, extra_metadata)
lines_to_next_cell = 1
if len(lines) > i + 1:
line = uncomment_line(lines[i + 1], header_prefix)
if not _BLANK_RE.match(line):
lines_to_next_cell = 0
else:
i = i + 1
else:
lines_to_next_cell = 0
if header:
if root_level_metadata_as_raw_cell:
cell = new_raw_cell(
source="\n".join(["---"] + header + ["---"]),
metadata={}
if lines_to_next_cell
== pep8_lines_between_cells(["---"], lines[i + 1 :], ext)
else {"lines_to_next_cell": lines_to_next_cell},
)
else:
cell = None
root_level_metadata = yaml.safe_load("\n".join(header))
metadata.setdefault("jupytext", {})[
"root_level_metadata"
] = root_level_metadata
else:
cell = None
return metadata, jupyter, cell, i + 1
return metadata, False, None, start
"""Read and write Jupyter notebooks as text files"""
from .formats import NOTEBOOK_EXTENSIONS, get_format_implementation, guess_format
from .jupytext import read, reads, write, writes
from .reraise import reraise
from .version import __version__
try:
from .contentsmanager import build_jupytext_contents_manager_class
except ImportError as err:
build_jupytext_contents_manager_class = reraise(err)
try:
from .contentsmanager import TextFileContentsManager
except ImportError as err:
TextFileContentsManager = reraise(err)
__all__ = [
"read",
"write",
"writes",
"reads",
"NOTEBOOK_EXTENSIONS",
"guess_format",
"get_format_implementation",
"TextFileContentsManager",
"build_jupytext_contents_manager_class",
"__version__",
]
@language python
@tabwidth -4
""" Main for Jupytext
Call with (e.g.)::
python -m jupytext my_notebook.ipynb --to Rmd
"""
import sys
from .cli import jupytext
if __name__ == "__main__":
sys.exit(jupytext())
@language python
@tabwidth -4
"""Find kernel specifications for a given language"""
import os
import sys
from .languages import same_language
from .reraise import reraise
try:
# I prefer not to take a dependency on jupyter_client
from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec
except ImportError as err:
find_kernel_specs = reraise(err)
get_kernel_spec = reraise(err)
@others
@language python
@tabwidth -4
def set_kernelspec_from_language(notebook):
"""Set the kernel specification based on the 'main_language' metadata"""
language = notebook.metadata.get("jupytext", {}).get("main_language")
if "kernelspec" not in notebook.metadata and language:
try:
kernelspec = kernelspec_from_language(language)
except ValueError:
return
notebook.metadata["kernelspec"] = kernelspec
notebook.metadata.get("jupytext", {}).pop("main_language")
def kernelspec_from_language(language):
"""Return the python kernel that matches the current env, or the first kernel that matches the given language"""
if language == "python":
# Return the kernel that matches the current Python executable
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
cmd = kernel_specs.argv[0]
if (
kernel_specs.language == "python"
and os.path.isfile(cmd)
and os.path.samefile(cmd, sys.executable)
):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError(
"No kernel found that matches the current python executable {}\n".format(
sys.executable
)
+ "Install one with 'python -m ipykernel install --name kernel_name [--user]'"
)
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
if same_language(kernel_specs.language, language):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError(f"No kernel found for the language {language}")
"""Determine notebook or cell language"""
import re
# Jupyter magic commands that are also languages
_JUPYTER_LANGUAGES = [
"R",
"bash",
"sh",
"python",
"python2",
"python3",
"coconut",
"javascript",
"js",
"perl",
"html",
"latex",
"markdown",
"pypy",
"ruby",
"script",
"svg",
"matlab",
"octave",
"idl",
"robotframework",
"sas",
"spark",
"sql",
"cython",
"haskell",
"tcl",
"gnuplot",
"wolfram language",
]
# Supported file extensions (and languages)
# Please add more languages here (and add a few tests) - see contributing.md
_SCRIPT_EXTENSIONS = {
".py": {"language": "python", "comment": "#"},
".coco": {"language": "coconut", "comment": "#"},
".R": {"language": "R", "comment": "#"},
".r": {"language": "R", "comment": "#"},
".jl": {"language": "julia", "comment": "#"},
".cpp": {"language": "c++", "comment": "//"},
".ss": {"language": "scheme", "comment": ";;"},
".clj": {"language": "clojure", "comment": ";;"},
".scm": {"language": "scheme", "comment": ";;"},
".sh": {"language": "bash", "comment": "#"},
".ps1": {"language": "powershell", "comment": "#"},
".q": {"language": "q", "comment": "/"},
".m": {"language": "matlab", "comment": "%"},
# Unfortunately, Wolfram Mathematica also uses the .m extension which
# conflicts with Matlab. To work around this problem we arbitrarily use a
# made-up .wolfram extension.
".wolfram": {
"language": "wolfram language",
"comment": "(*",
"comment_suffix": "*)",
},
".pro": {"language": "idl", "comment": ";"},
".js": {"language": "javascript", "comment": "//"},
".ts": {"language": "typescript", "comment": "//"},
".scala": {"language": "scala", "comment": "//"},
".rs": {"language": "rust", "comment": "//"},
".robot": {"language": "robotframework", "comment": "#"},
".resource": {"language": "robotframework", "comment": "#"},
".cs": {"language": "csharp", "comment": "//"},
".fsx": {"language": "fsharp", "comment": "//"},
".fs": {"language": "fsharp", "comment": "//"},
".sos": {"language": "sos", "comment": "#"},
".java": {"language": "java", "comment": "//"},
".groovy": {"language": "groovy", "comment": "//"},
".sage": {"language": "sage", "comment": "#"},
".ml": {
"language": "ocaml",
"comment": "(*",
"comment_suffix": "*)",
}, # OCaml only has block comments
".hs": {"language": "haskell", "comment": "--"},
".tcl": {"language": "tcl", "comment": "#"},
".mac": {
"language": "maxima",
"comment": "/*",
"comment_suffix": "*/",
}, # Maxima only has block comments
".gp": {"language": "gnuplot", "comment": "#"},
".do": {"language": "stata", "comment": "//"},
".sas": {
"language": "sas",
"comment": "/*",
"comment_suffix": "*/",
},
".xsh": {"language": "xonsh", "comment": "#"},
".lua": {"language": "lua", "comment": "--"},
".go": {"language": "go", "comment": "//"},
}
_COMMENT_CHARS = [
_SCRIPT_EXTENSIONS[ext]["comment"]
for ext in _SCRIPT_EXTENSIONS
if _SCRIPT_EXTENSIONS[ext]["comment"] != "#"
]
_COMMENT = {
_SCRIPT_EXTENSIONS[ext]["language"]: _SCRIPT_EXTENSIONS[ext]["comment"]
for ext in _SCRIPT_EXTENSIONS
}
_JUPYTER_LANGUAGES = (
set(_JUPYTER_LANGUAGES).union(_COMMENT.keys()).union(["c#", "f#", "cs", "fs"])
)
_JUPYTER_LANGUAGES_LOWER_AND_UPPER = _JUPYTER_LANGUAGES.union(
{str.upper(lang) for lang in _JUPYTER_LANGUAGES}
)
_GO_DOUBLE_PERCENT_COMMAND = re.compile(r"^(%%\s*|%%\s+-.*)$")
@others
@language python
@tabwidth -4
def default_language_from_metadata_and_ext(metadata, ext, pop_main_language=False):
"""Return the default language given the notebook metadata, and a file extension"""
default_from_ext = _SCRIPT_EXTENSIONS.get(ext, {}).get("language")
main_language = metadata.get("jupytext", {}).get("main_language")
default_language = (
metadata.get("kernelspec", {}).get("language") or default_from_ext
)
language = main_language or default_language
if (
main_language is not None
and main_language == default_language
and pop_main_language
):
metadata["jupytext"].pop("main_language")
if language is None or language in ["R", "sas"]:
return language
if language.startswith("C++"):
return "c++"
return language.lower().replace("#", "sharp")
"""
Convert between text notebook metadata and jupyter cell metadata.
Standard cell metadata are documented here:
See also https://ipython.org/ipython-doc/3/notebook/nbformat.html#cell-metadata
"""
import ast
import re
from json import dumps, loads
try:
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from .languages import _JUPYTER_LANGUAGES
# Map R Markdown's "echo", "results" and "include" to "hide_input" and "hide_output", that are understood by the
# `runtools` extension for Jupyter notebook, and by nbconvert (use the `hide_input_output.tpl` template).
# See http://jupyter-contrib-nbextensions.readthedocs.io/en/latest/nbextensions/runtools/readme.html
_RMARKDOWN_TO_RUNTOOLS_OPTION_MAP = [
(("include", "FALSE"), [("hide_input", True), ("hide_output", True)]),
(("echo", "FALSE"), [("hide_input", True)]),
(("results", "'hide'"), [("hide_output", True)]),
(("results", '"hide"'), [("hide_output", True)]),
]
# Alternatively, Jupytext can also map the Jupyter Book options to R Markdown
_RMARKDOWN_TO_JUPYTER_BOOK_MAP = [
(("include", "FALSE"), "remove_cell"),
(("echo", "FALSE"), "remove_input"),
(("results", "'hide'"), "remove_output"),
(("results", '"hide"'), "remove_output"),
]
_JUPYTEXT_CELL_METADATA = [
# Pre-jupytext metadata
"skipline",
"noskipline",
# Jupytext metadata
"cell_marker",
"lines_to_next_cell",
"lines_to_end_of_cell_marker",
]
_IGNORE_CELL_METADATA = ",".join(
f"-{name}"
for name in [
# Frequent cell metadata that should not enter the text representation
# (these metadata are preserved in the paired Jupyter notebook).
"autoscroll",
"collapsed",
"scrolled",
"trusted",
"execution",
"ExecuteTime",
]
+ _JUPYTEXT_CELL_METADATA
)
# In R Markdown we might have options without a value
_IS_IDENTIFIER = re.compile(r"^[a-zA-Z_\.]+[a-zA-Z0-9_\.]*$")
_IS_VALID_METADATA_KEY = re.compile(r"^[a-zA-Z0-9_\.-]+$")
@others
@language python
@tabwidth -4
def usual_language_name(language):
"""Return the usual language name (one that may be found in _SCRIPT_EXTENSIONS above)"""
language = language.lower()
if language == "r":
return "R"
if language.startswith("c++"):
return "c++"
if language == "octave":
return "matlab"
if language in ["cs", "c#"]:
return "csharp"
if language in ["fs", "f#"]:
return "fsharp"
if language == "sas":
return "SAS"
return language
def same_language(kernel_language, language):
"""Are those the same language?"""
return usual_language_name(kernel_language) == usual_language_name(language)
def set_main_and_cell_language(metadata, cells, ext, custom_cell_magics):
"""Set main language for the given collection of cells, and
use magics for cells that use other languages"""
main_language = default_language_from_metadata_and_ext(metadata, ext)
if main_language is None:
languages = {"python": 0.5}
for cell in cells:
if "language" in cell["metadata"]:
language = usual_language_name(cell["metadata"]["language"])
languages[language] = languages.get(language, 0.0) + 1
main_language = max(languages, key=languages.get)
# save main language when no kernel is set
if "language" not in metadata.get("kernelspec", {}) and cells:
metadata.setdefault("jupytext", {})["main_language"] = main_language
# Remove 'language' meta data and add a magic if not main language
for cell in cells:
if "language" in cell["metadata"]:
language = cell["metadata"]["language"]
if language == main_language:
cell["metadata"].pop("language")
continue
if usual_language_name(language) == main_language:
continue
if language in _JUPYTER_LANGUAGES or language in custom_cell_magics:
cell["metadata"].pop("language")
magic = "%%" if main_language != "csharp" else "#!"
if "magic_args" in cell["metadata"]:
magic_args = cell["metadata"].pop("magic_args")
cell["source"] = (
f"{magic}{language} {magic_args}\n" + cell["source"]
)
else:
cell["source"] = f"{magic}{language}\n" + cell["source"]
def cell_language(source, default_language, custom_cell_magics):
"""Return cell language and language options, if any"""
if source:
line = source[0]
if default_language == "go" and _GO_DOUBLE_PERCENT_COMMAND.match(line):
return None, None
if default_language == "csharp":
if line.startswith("#!"):
lang = line[2:].strip()
if lang in _JUPYTER_LANGUAGES:
source.pop(0)
return lang, ""
elif line.startswith("%%"):
magic = line[2:]
if " " in magic:
lang, magic_args = magic.split(" ", 1)
else:
lang = magic
magic_args = ""
if lang in _JUPYTER_LANGUAGES or lang in custom_cell_magics:
source.pop(0)
return lang, magic_args
return None, None
def comment_lines(lines, prefix, suffix=""):
"""Return commented lines"""
if not prefix:
return lines
if not suffix:
return [prefix + " " + line if line else prefix for line in lines]
return [
prefix + " " + line + " " + suffix if line else prefix + " " + suffix
for line in lines
]
"""Escape Jupyter magics when converting to other formats"""
import re
from .languages import _COMMENT, _SCRIPT_EXTENSIONS, usual_language_name
from .stringparser import StringParser
@others
@language python
@tabwidth -4
def get_comment(ext):
return re.escape(_SCRIPT_EXTENSIONS[ext]["comment"])
# A magic expression is a line or cell or metakernel magic (#94, #61) escaped zero, or multiple times
_MAGIC_RE = {
_SCRIPT_EXTENSIONS[ext]["language"]: re.compile(
r"^\s*({0} |{0})*(%|%%|%%%)[a-zA-Z]".format(get_comment(ext))
)
for ext in _SCRIPT_EXTENSIONS
}
_MAGIC_FORCE_ESC_RE = {
_SCRIPT_EXTENSIONS[ext]["language"]: re.compile(
r"^\s*({0} |{0})*(%|%%|%%%)[a-zA-Z](.*){0}\s*escape".format(get_comment(ext))
)
for ext in _SCRIPT_EXTENSIONS
}
_MAGIC_NOT_ESC_RE = {
_SCRIPT_EXTENSIONS[ext]["language"]: re.compile(
r"^\s*({0} |{0})*(%|%%|%%%)[a-zA-Z](.*){0}\s*noescape".format(get_comment(ext))
)
for ext in _SCRIPT_EXTENSIONS
}
_LINE_CONTINUATION_RE = re.compile(r".*\\\s*$")
# Rust magics start with single ':' #351
_MAGIC_RE["rust"] = re.compile(r"^(// |//)*:[a-zA-Z]")
_MAGIC_FORCE_ESC_RE["rust"] = re.compile(r"^(// |//)*:[a-zA-Z](.*)//\s*escape")
_MAGIC_FORCE_ESC_RE["rust"] = re.compile(r"^(// |//)*:[a-zA-Z](.*)//\s*noescape")
# C# magics start with '#!'
_MAGIC_RE["csharp"] = re.compile(r"^(// |//)*#![a-zA-Z]")
_MAGIC_FORCE_ESC_RE["csharp"] = re.compile(r"^(// |//)*#//\s*escape")
_MAGIC_FORCE_ESC_RE["csharp"] = re.compile(r"^(// |//)*#//\s*noescape")
# Go magics might start with % or ! or !*
# (in addition, Go NB might use %% or %% -, see "_GO_DOUBLE_PERCENT_COMMAND")
_MAGIC_RE["go"] = re.compile(r"^(// |//)*(!|!\*|%|%%|%%%)[a-zA-Z]")
# Commands starting with a question or exclamation mark have to be escaped
_PYTHON_HELP_OR_BASH_CMD = re.compile(r"^\s*(# |#)*\s*(\?|!)\s*[A-Za-z\.\~\$\\\/\{\}]")
# A bash command not followed by an equal sign or a parenthesis is a magic command
_PYTHON_MAGIC_CMD = re.compile(
r"^(# |#)*({})($|\s$|\s[^=,])".format(
"|".join(
# posix
["cat", "cd", "cp", "mv", "rm", "rmdir", "mkdir"]
+ # noqa: W504
# windows
["copy", "ddir", "echo", "ls", "ldir", "mkdir", "ren", "rmdir"]
)
)
)
# Python help commands end with ?
_IPYTHON_MAGIC_HELP = re.compile(r"^\s*(# )*[^\s]*\?\s*$")
_PYTHON_MAGIC_ASSIGN = re.compile(
r"^(# |#)*\s*([a-zA-Z_][a-zA-Z_$0-9]*)\s*=\s*(%|%%|%%%|!)[a-zA-Z](.*)"
)
_SCRIPT_LANGUAGES = [_SCRIPT_EXTENSIONS[ext]["language"] for ext in _SCRIPT_EXTENSIONS]
def is_magic(line, language, global_escape_flag=True, explicitly_code=False):
"""Is the current line a (possibly escaped) Jupyter magic, and should it be commented?"""
language = usual_language_name(language)
if language in ["octave", "matlab", "sas"] or language not in _SCRIPT_LANGUAGES:
return False
if _MAGIC_FORCE_ESC_RE[language].match(line):
return True
if not global_escape_flag or _MAGIC_NOT_ESC_RE[language].match(line):
return False
if _MAGIC_RE[language].match(line):
return True
if language != "python":
return False
if _PYTHON_HELP_OR_BASH_CMD.match(line):
return True
if _PYTHON_MAGIC_ASSIGN.match(line):
return True
if explicitly_code and _IPYTHON_MAGIC_HELP.match(line):
return True
return _PYTHON_MAGIC_CMD.match(line)
def need_explicit_marker(
source, language="python", global_escape_flag=True, explicitly_code=True
):
"""Does this code needs an explicit cell marker?"""
if language != "python" or not global_escape_flag or not explicitly_code:
return False
parser = StringParser(language)
for line in source:
if not parser.is_quoted() and is_magic(
line, language, global_escape_flag, explicitly_code
):
if not is_magic(line, language, global_escape_flag, False):
return True
parser.read_line(line)
return False
def comment_magic(
source, language="python", global_escape_flag=True, explicitly_code=True
):
"""Escape Jupyter magics with '# '"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (
next_is_magic
or is_magic(line, language, global_escape_flag, explicitly_code)
):
if next_is_magic:
# this is the continuation line of a magic command on the previous line,
# so we don't want to indent the comment
unindented = line
indent = ""
else:
unindented = line.lstrip()
indent = line[: len(line) - len(unindented)]
source[pos] = indent + _COMMENT[language] + " " + unindented
next_is_magic = language == "python" and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source
class RLogicalValueError(Exception):
"""Incorrect value for R boolean"""
def unesc(line, language):
"""Uncomment once a commented line"""
comment = _COMMENT[language]
unindented = line.lstrip()
indent = line[: len(line) - len(unindented)]
if unindented.startswith(comment + " "):
return indent + unindented[len(comment) + 1 :]
if unindented.startswith(comment):
return indent + unindented[len(comment) :]
return line
def uncomment_magic(
source, language="python", global_escape_flag=True, explicitly_code=True
):
"""Unescape Jupyter magics"""
parser = StringParser(language)
next_is_magic = False
for pos, line in enumerate(source):
if not parser.is_quoted() and (
next_is_magic
or is_magic(line, language, global_escape_flag, explicitly_code)
):
source[pos] = unesc(line, language)
next_is_magic = language == "python" and _LINE_CONTINUATION_RE.match(line)
parser.read_line(line)
return source
_ESCAPED_CODE_START = {
".Rmd": re.compile(r"^(# |#)*```{.*}"),
".md": re.compile(r"^(# |#)*```"),
".markdown": re.compile(r"^(# |#)*```"),
}
_ESCAPED_CODE_START.update(
{
ext: re.compile(r"^({0} |{0})*({0}|{0} )\+".format(get_comment(ext)))
for ext in _SCRIPT_EXTENSIONS
}
)
def is_escaped_code_start(line, ext):
"""Is the current line a possibly commented code start marker?"""
return _ESCAPED_CODE_START[ext].match(line)
def escape_code_start(source, ext, language="python"):
"""Escape code start with '# '"""
parser = StringParser(language)
for pos, line in enumerate(source):
if not parser.is_quoted() and is_escaped_code_start(line, ext):
source[pos] = (
_SCRIPT_EXTENSIONS.get(ext, {}).get("comment", "#") + " " + line
)
parser.read_line(line)
return source
def unescape_code_start(source, ext, language="python"):
"""Unescape code start"""
parser = StringParser(language)
for pos, line in enumerate(source):
if not parser.is_quoted() and is_escaped_code_start(line, ext):
unescaped = unesc(line, language)
# don't remove comment char if we break the code start...
if is_escaped_code_start(unescaped, ext):
source[pos] = unescaped
parser.read_line(line)
return source
"""Notebook and cell metadata filtering"""
from copy import copy
from .cell_metadata import _JUPYTEXT_CELL_METADATA, is_valid_metadata_key
_DEFAULT_NOTEBOOK_METADATA = ",".join(
[
# Preserve Jupytext section
"jupytext",
# Preserve kernel specs
"kernelspec",
# Kernel_info found in Nteract notebooks
"kernel_info",
# Used in MyST notebooks
"orphan",
"tocdepth",
]
)
@others
@language python
@tabwidth -4
def metadata_filter_as_dict(metadata_config):
"""Return the metadata filter represented as either None (no filter),
or a dictionary with at most two keys: 'additional' and 'excluded',
which contain either a list of metadata names, or the string 'all'"""
if metadata_config is None:
return {}
if metadata_config is True:
return {"additional": "all"}
if metadata_config is False:
return {"excluded": "all"}
if isinstance(metadata_config, dict):
assert set(metadata_config) <= {"additional", "excluded"}
return metadata_config
metadata_keys = metadata_config.split(",")
metadata_config = {}
for key in metadata_keys:
key = key.strip()
if key.startswith("-"):
metadata_config.setdefault("excluded", []).append(key[1:].strip())
elif key.startswith("+"):
metadata_config.setdefault("additional", []).append(key[1:].strip())
else:
metadata_config.setdefault("additional", []).append(key)
for section in metadata_config:
if "all" in metadata_config[section]:
metadata_config[section] = "all"
else:
metadata_config[section] = [k for k in metadata_config[section] if k]
return metadata_config
def metadata_filter_as_string(metadata_filter):
"""Convert a filter, represented as a dictionary with 'additional' and 'excluded' entries, to a string"""
if not isinstance(metadata_filter, dict):
return metadata_filter
additional = metadata_filter.get("additional", [])
if additional == "all":
entries = ["all"]
else:
entries = [key for key in additional if key not in _JUPYTEXT_CELL_METADATA]
excluded = metadata_filter.get("excluded", [])
if excluded == "all":
entries.append("-all")
else:
entries.extend(["-" + e for e in excluded])
return ",".join(entries)
def update_metadata_filters(metadata, jupyter_md, cell_metadata):
"""Update or set the notebook and cell metadata filters"""
if not jupyter_md:
# Set a metadata filter equal to the current metadata in script
metadata.setdefault("jupytext", {})["notebook_metadata_filter"] = "-all"
metadata["jupytext"].setdefault(
"cell_metadata_filter",
metadata_filter_as_string({"additional": cell_metadata, "excluded": "all"}),
)
elif "cell_metadata_filter" in metadata.get("jupytext", {}):
# Update the existing metadata filter
metadata_filter = metadata_filter_as_dict(
metadata.get("jupytext", {})["cell_metadata_filter"]
)
if isinstance(metadata_filter.get("excluded"), list):
metadata_filter["excluded"] = [
key for key in metadata_filter["excluded"] if key not in cell_metadata
]
metadata_filter.setdefault("additional", [])
if isinstance(metadata_filter.get("additional"), list):
for key in cell_metadata:
if key not in metadata_filter["additional"]:
metadata_filter["additional"].append(key)
metadata.setdefault("jupytext", {})[
"cell_metadata_filter"
] = metadata_filter_as_string(metadata_filter)
else:
# Update the notebook metadata filter to include existing entries 376
nb_md_filter = (
metadata.get("jupytext", {}).get("notebook_metadata_filter", "").split(",")
)
nb_md_filter = [key for key in nb_md_filter if key]
if "all" in nb_md_filter or "-all" in nb_md_filter:
return
for key in metadata:
if (
key in _DEFAULT_NOTEBOOK_METADATA.split(",")
or key in nb_md_filter
or ("-" + key) in nb_md_filter
):
continue
nb_md_filter.append(key)
if nb_md_filter:
metadata.setdefault("jupytext", {})["notebook_metadata_filter"] = ",".join(
nb_md_filter
)
def filter_metadata(metadata, user_filter, default_filter="", unsupported_keys=None):
"""Filter the cell or notebook metadata, according to the user preference"""
default_filter = metadata_filter_as_dict(default_filter) or {}
user_filter = metadata_filter_as_dict(user_filter) or {}
default_exclude = default_filter.get("excluded", [])
default_include = default_filter.get("additional", [])
assert not (default_exclude == "all" and default_include == "all")
if isinstance(default_include, list) and default_include and default_exclude == []:
default_exclude = "all"
user_exclude = user_filter.get("excluded", [])
user_include = user_filter.get("additional", [])
# notebook default filter = include only few metadata
if default_exclude == "all":
if user_include == "all":
return subset_metadata(
metadata, exclude=user_exclude, unsupported_keys=unsupported_keys
)
if user_exclude == "all":
return subset_metadata(
metadata, keep_only=user_include, unsupported_keys=unsupported_keys
)
return subset_metadata(
metadata,
keep_only=set(user_include).union(default_include),
exclude=user_exclude,
unsupported_keys=unsupported_keys,
)
# cell default filter = all metadata but removed ones
if user_include == "all":
return subset_metadata(
metadata, exclude=user_exclude, unsupported_keys=unsupported_keys
)
if user_exclude == "all":
return subset_metadata(
metadata, keep_only=user_include, unsupported_keys=unsupported_keys
)
# Do not serialize empty tags
if "tags" in metadata and not metadata["tags"]:
metadata = metadata.copy()
metadata.pop("tags")
return subset_metadata(
metadata,
exclude=set(user_exclude).union(set(default_exclude).difference(user_include)),
unsupported_keys=unsupported_keys,
)
class RMarkdownOptionParsingError(Exception):
"""Error when parsing Rmd cell options"""
def second_level(keys):
"""Return a dictionary with the nested keys, e.g. returns {'I':['a', 'b']} when keys=['I.a', 'I.b']"""
sub_keys = {}
for key in keys:
if "." in key:
left, right = key.split(".", 1)
sub_keys.setdefault(left, []).append(right)
return sub_keys
def suppress_unsupported_keys(metadata, unsupported_keys=None):
# assert isinstance(unsupported_keys, set)
if isinstance(unsupported_keys, set):
for key in metadata:
if not is_valid_metadata_key(key):
unsupported_keys.add(key)
return {key: value for key, value in metadata.items() if is_valid_metadata_key(key)}
def subset_metadata(metadata, keep_only=None, exclude=None, unsupported_keys=None):
"""Filter the metadata"""
metadata = suppress_unsupported_keys(metadata, unsupported_keys=unsupported_keys)
if keep_only is not None:
filtered_metadata = {key: metadata[key] for key in metadata if key in keep_only}
sub_keep_only = second_level(keep_only)
for key in sub_keep_only:
if key in metadata:
filtered_metadata[key] = subset_metadata(
metadata[key],
keep_only=sub_keep_only[key],
unsupported_keys=unsupported_keys,
)
else:
filtered_metadata = copy(metadata)
if exclude is not None:
for key in exclude:
if key in filtered_metadata:
filtered_metadata.pop(key)
sub_exclude = second_level(exclude)
for key in sub_exclude:
if key in filtered_metadata:
filtered_metadata[key] = subset_metadata(
filtered_metadata[key],
exclude=sub_exclude[key],
unsupported_keys=unsupported_keys,
)
return filtered_metadata
def restore_filtered_metadata(
filtered_metadata, unfiltered_metadata, user_filter, default_filter
):
"""Update the filtered metadata with the part of the unfiltered one that matches the filter"""
filtered_unfiltered_metadata = filter_metadata(
unfiltered_metadata, user_filter, default_filter
)
metadata = copy(filtered_metadata)
for key in unfiltered_metadata:
if key not in filtered_unfiltered_metadata:
# We don't want to restore the line_to_next_cell metadata from the ipynb file, see #761
if key not in _JUPYTEXT_CELL_METADATA:
metadata[key] = unfiltered_metadata[key]
return metadata
"""
This module contains round-trip conversion between
myst formatted text documents and notebooks.
"""
import json
import re
import warnings
from textwrap import dedent
import nbformat as nbf
import yaml
from .cell_to_text import three_backticks_or_more
try:
from markdown_it import MarkdownIt
from mdit_py_plugins.front_matter import front_matter_plugin
from mdit_py_plugins.myst_blocks import myst_block_plugin
from mdit_py_plugins.myst_role import myst_role_plugin
except ImportError:
MarkdownIt = None
MYST_FORMAT_NAME = "myst"
CODE_DIRECTIVE = "{code-cell}"
RAW_DIRECTIVE = "{raw-cell}"
@others
@language python
@tabwidth -4
def is_myst_available():
"""Whether the markdown-it-py package is available."""
return MarkdownIt is not None
def raise_if_myst_is_not_available():
if not is_myst_available():
raise ImportError(
"The MyST Markdown format requires python >= 3.6 and markdown-it-py~=1.0"
)
def myst_version():
"""The version of myst."""
return 0.13
def myst_extensions(no_md=False):
"""The allowed extensions for the myst format."""
if no_md:
return [".myst", ".mystnb", ".mnb"]
return [".md", ".myst", ".mystnb", ".mnb"]
def get_parser():
"""Return the markdown-it parser to use."""
parser = (
MarkdownIt("commonmark")
.enable("table")
.use(front_matter_plugin)
.use(myst_block_plugin)
.use(myst_role_plugin)
# we only need to parse block level components (for efficiency)
.disable("inline", True)
)
return parser
def _py_logical_values(rbool):
if rbool in ["TRUE", "T"]:
return True
if rbool in ["FALSE", "F"]:
return False
raise RLogicalValueError
def matches_mystnb(
text,
ext=None,
requires_meta=True,
code_directive=CODE_DIRECTIVE,
raw_directive=RAW_DIRECTIVE,
):
"""Attempt to distinguish a file as myst, only given its extension and content.
:param ext: the extension of the file
:param requires_meta: requires the file to contain top matter metadata
:param code_directive: the name of the directive to search for containing code cells
:param raw_directive: the name of the directive to search for containing raw cells
"""
# is the extension uniquely associated with myst (i.e. not just .md)
if ext and "." + ("." + ext).rsplit(".", 1)[1] in myst_extensions(no_md=True):
return True
# might the text contain metadata front matter
if requires_meta and not text.startswith("---"):
return False
try:
tokens = get_parser().parse(text + "\n")
except (TypeError, ValueError) as err:
warnings.warn(f"myst-parser failed unexpectedly: {err}") # pragma: no cover
return False
# Is the format information available in the jupytext text representation?
if tokens and tokens[0].type == "front_matter":
try:
metadata = yaml.safe_load(tokens[0].content)
except (yaml.parser.ParserError, yaml.scanner.ScannerError):
pass
else:
try:
if (
metadata.get("jupytext", {})
.get("text_representation", {})
.get("format_name", "")
== MYST_FORMAT_NAME
):
return True
except AttributeError:
pass
# is there at least on fenced code block with a code/raw directive language
for token in tokens:
if token.type == "fence" and (
token.info.startswith(code_directive)
or token.info.startswith(raw_directive)
):
return True
return False
class CompactDumper(yaml.SafeDumper):
"""This YAML dumper creates a more compact style for lists"""
def represent_list(self, data):
"""Compact lists"""
flow_style = not any(isinstance(i, dict) for i in data)
return self.represent_sequence("tag:yaml.org,2002:seq", data, flow_style=flow_style)
def represent_dict(self, data):
"""Compact dicts"""
return self.represent_mapping("tag:yaml.org,2002:map", data, flow_style=False)
CompactDumper.add_representer(list, represent_list)
CompactDumper.add_representer(dict, represent_dict)
def dump_yaml_blocks(data, compact=True):
"""Where possible, we try to use a more compact metadata style.
For blocks with no nested dicts, the block is denoted by starting colons::
:other: true
:tags: [hide-output, show-input]
For blocks with nesting the block is enlosed by ``---``::
---
other:
more: true
tags: [hide-output, show-input]
---
"""
string = yaml.dump(data, Dumper=CompactDumper)
lines = string.splitlines()
if compact and all(line and line[0].isalpha() for line in lines):
return "\n".join([f":{line}" for line in lines]) + "\n\n"
return f"---\n{string}---\n"
def from_nbnode(value):
"""Recursively convert NotebookNode to dict."""
if isinstance(value, nbf.NotebookNode):
return {k: from_nbnode(v) for k, v in value.items()}
return value
class MystMetadataParsingError(Exception):
"""Error when parsing metadata from myst formatted text"""
def strip_blank_lines(text):
"""Remove initial blank lines"""
text = text.rstrip()
while text and text.startswith("\n"):
text = text[1:]
return text
def read_fenced_cell(token, cell_index, cell_type):
"""Parse (and validate) the full directive text."""
content = token.content
error_msg = "{} cell {} at line {} could not be read: ".format(
cell_type, cell_index, token.map[0] + 1
)
body_lines, options = parse_directive_options(content, error_msg)
# remove first line of body if blank
# this is to allow space between the options and the content
if body_lines and not body_lines[0].strip():
body_lines = body_lines[1:]
return options, body_lines
def parse_directive_options(content, error_msg):
"""Parse (and validate) the directive option section."""
options = {}
if content.startswith("---"):
content = "\n".join(content.splitlines()[1:])
match = re.search(r"^-{3,}", content, re.MULTILINE)
if match:
yaml_block = content[: match.start()]
content = content[match.end() + 1 :]
else:
yaml_block = content
content = ""
yaml_block = dedent(yaml_block)
try:
options = yaml.safe_load(yaml_block) or {}
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error:
raise MystMetadataParsingError(error_msg + "Invalid YAML; " + str(error))
elif content.lstrip().startswith(":"):
content_lines = content.splitlines() # type: list
yaml_lines = []
while content_lines:
if not content_lines[0].lstrip().startswith(":"):
break
yaml_lines.append(content_lines.pop(0).lstrip()[1:])
yaml_block = "\n".join(yaml_lines)
content = "\n".join(content_lines)
try:
options = yaml.safe_load(yaml_block) or {}
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error:
raise MystMetadataParsingError(error_msg + "Invalid YAML; " + str(error))
return content.splitlines(), options
@language rust
// Characters, length 3.
a = 'x'
b = 'é'
// Characters, length 4.
nl = '\n'
null = '\0'
tab = '\t'
ret = '\r'
bs = '\\'
// Characters, length 10 and 8.
char10 = '\u{7fff}'
char8 = '\x7f'
// Non characters.
other 'xx
bad_escaple '\y'
// Lifetimes.
pub fn <'a>:
def metadata_to_rmd_options(language, metadata, use_runtools=False):
"""Convert language and metadata information to their rmd representation"""
options = (language or "R").lower()
if "name" in metadata:
options += " " + metadata["name"] + ","
del metadata["name"]
if use_runtools:
for rmd_option, jupyter_options in _RMARKDOWN_TO_RUNTOOLS_OPTION_MAP:
if all(
[
metadata.get(opt_name) == opt_value
for opt_name, opt_value in jupyter_options
]
):
options += " {}={},".format(
rmd_option[0], "FALSE" if rmd_option[1] is False else rmd_option[1]
)
for opt_name, _ in jupyter_options:
metadata.pop(opt_name)
else:
for rmd_option, tag in _RMARKDOWN_TO_JUPYTER_BOOK_MAP:
if tag in metadata.get("tags", []):
options += " {}={},".format(
rmd_option[0], "FALSE" if rmd_option[1] is False else rmd_option[1]
)
metadata["tags"] = [i for i in metadata["tags"] if i != tag]
if not metadata["tags"]:
metadata.pop("tags")
for opt_name in metadata:
opt_value = metadata[opt_name]
opt_name = opt_name.strip()
if opt_name == "active":
options += f' {opt_name}="{str(opt_value)}",'
elif isinstance(opt_value, bool):
options += " {}={},".format(opt_name, "TRUE" if opt_value else "FALSE")
elif isinstance(opt_value, list):
options += " {}={},".format(
opt_name,
"c({})".format(", ".join([f'"{str(v)}"' for v in opt_value])),
)
elif isinstance(opt_value, str):
if opt_value.startswith("#R_CODE#"):
options += f" {opt_name}={opt_value[8:]},"
elif '"' not in opt_value:
options += f' {opt_name}="{opt_value}",'
else:
options += f" {opt_name}='{opt_value}',"
else:
options += f" {opt_name}={str(opt_value)},"
if not language:
options = options[2:]
return options.strip(",").strip()
def read_cell_metadata(token, cell_index):
"""Return cell metadata"""
metadata = {}
if token.content:
try:
metadata = json.loads(token.content.strip())
except Exception as err:
raise MystMetadataParsingError(
"Markdown cell {} at line {} could not be read: {}".format(
cell_index, token.map[0] + 1, err
)
)
if not isinstance(metadata, dict):
raise MystMetadataParsingError(
"Markdown cell {} at line {} is not a dict".format(
cell_index, token.map[0] + 1
)
)
return metadata
def myst_to_notebook(
text,
code_directive=CODE_DIRECTIVE,
raw_directive=RAW_DIRECTIVE,
add_source_map=False,
):
"""Convert text written in the myst format to a notebook.
:param text: the file text
:param code_directive: the name of the directive to search for containing code cells
:param raw_directive: the name of the directive to search for containing raw cells
:param add_source_map: add a `source_map` key to the notebook metadata,
which is a list of the starting source line number for each cell.
:raises MystMetadataParsingError if the metadata block is not valid JSON/YAML
NOTE: we assume here that all of these directives are at the top-level,
i.e. not nested in other directives.
"""
raise_if_myst_is_not_available()
tokens = get_parser().parse(text + "\n")
lines = text.splitlines()
md_start_line = 0
# get the document metadata
metadata_nb = {}
if tokens and tokens[0].type == "front_matter":
metadata = tokens.pop(0)
md_start_line = metadata.map[1]
try:
metadata_nb = yaml.safe_load(metadata.content)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error:
raise MystMetadataParsingError(f"Notebook metadata: {error}")
# create an empty notebook
nbf_version = nbf.v4
kwargs = {"metadata": nbf.from_dict(metadata_nb)}
notebook = nbf_version.new_notebook(**kwargs)
source_map = [] # this is a list of the starting line number for each cell
def _flush_markdown(start_line, token, md_metadata):
"""When we find a cell we check if there is preceding text.o"""
endline = token.map[0] if token else len(lines)
md_source = strip_blank_lines("\n".join(lines[start_line:endline]))
meta = nbf.from_dict(md_metadata)
if md_source:
source_map.append(start_line)
notebook.cells.append(
nbf_version.new_markdown_cell(source=md_source, metadata=meta)
)
# iterate through the tokens to identify notebook cells
nesting_level = 0
md_metadata = {}
for token in tokens:
nesting_level += token.nesting
if nesting_level != 0:
# we ignore fenced block that are nested, e.g. as part of lists, etc
continue
if token.type == "fence" and token.info.startswith(code_directive):
_flush_markdown(md_start_line, token, md_metadata)
options, body_lines = read_fenced_cell(token, len(notebook.cells), "Code")
meta = nbf.from_dict(options)
source_map.append(token.map[0] + 1)
notebook.cells.append(
nbf_version.new_code_cell(source="\n".join(body_lines), metadata=meta)
)
md_metadata = {}
md_start_line = token.map[1]
elif token.type == "fence" and token.info.startswith(raw_directive):
_flush_markdown(md_start_line, token, md_metadata)
options, body_lines = read_fenced_cell(token, len(notebook.cells), "Raw")
meta = nbf.from_dict(options)
source_map.append(token.map[0] + 1)
notebook.cells.append(
nbf_version.new_raw_cell(source="\n".join(body_lines), metadata=meta)
)
md_metadata = {}
md_start_line = token.map[1]
elif token.type == "myst_block_break":
_flush_markdown(md_start_line, token, md_metadata)
md_metadata = read_cell_metadata(token, len(notebook.cells))
md_start_line = token.map[1]
_flush_markdown(md_start_line, None, md_metadata)
if add_source_map:
notebook.metadata["source_map"] = source_map
return notebook
def notebook_to_myst(
nb,
code_directive=CODE_DIRECTIVE,
raw_directive=RAW_DIRECTIVE,
default_lexer=None,
):
"""Parse a notebook to a MyST formatted text document.
:param nb: the notebook to parse
:param code_directive: the name of the directive to use for code cells
:param raw_directive: the name of the directive to use for raw cells
:param default_lexer: a lexer name to use for annotating code cells
(if ``nb.metadata.language_info.pygments_lexer`` is not available)
"""
raise_if_myst_is_not_available()
string = ""
nb_metadata = from_nbnode(nb.metadata)
# we add the pygments lexer as a directive argument, for use by syntax highlighters
pygments_lexer = nb_metadata.get("language_info", {}).get("pygments_lexer", None)
if pygments_lexer is None:
pygments_lexer = default_lexer
if nb_metadata:
string += dump_yaml_blocks(nb_metadata, compact=False)
last_cell_md = False
for i, cell in enumerate(nb.cells):
if cell.cell_type == "markdown":
metadata = from_nbnode(cell.metadata)
if metadata or last_cell_md:
if metadata:
string += f"\n+++ {json.dumps(metadata)}\n"
else:
string += "\n+++\n"
string += "\n" + cell.source
if not cell.source.endswith("\n"):
string += "\n"
last_cell_md = True
elif cell.cell_type in ["code", "raw"]:
cell_delimiter = three_backticks_or_more(cell.source.splitlines())
string += "\n{}{}".format(
cell_delimiter,
code_directive if cell.cell_type == "code" else raw_directive,
)
if pygments_lexer and cell.cell_type == "code":
string += f" {pygments_lexer}"
string += "\n"
metadata = from_nbnode(cell.metadata)
if metadata:
string += dump_yaml_blocks(metadata)
elif cell.source.startswith("---") or cell.source.startswith(":"):
string += "\n"
string += cell.source
if not cell.source.endswith("\n"):
string += "\n"
string += cell_delimiter + "\n"
last_cell_md = False
else:
raise NotImplementedError(f"cell {i}, type: {cell.cell_type}")
return string.rstrip() + "\n"
"""List all the paths associated to a given notebook"""
import os
from .config import find_jupytext_configuration_file
from .formats import (
NOTEBOOK_EXTENSIONS,
long_form_multiple_formats,
long_form_one_format,
short_form_multiple_formats,
short_form_one_format,
)
@others
@language python
@tabwidth -4
class InconsistentPath(ValueError):
"""An exception raised when the path of a notebook is not consistent with the jupytext.format
information it contains"""
def split(path, sep):
if sep not in path:
return "", path
return path.rsplit(sep, 1)
def join(left, right, sep):
if left:
return left + sep + right
return right
def separator(path):
"""Return the local path separator (always / in the contents manager)"""
if os.path.sep == "\\" and "\\" in path:
return "\\"
return "/"
def base_path(main_path, fmt, formats=None):
"""Given a path and options for a format (ext, suffix, prefix), return the corresponding base path"""
fmt = long_form_one_format(fmt)
base, ext = os.path.splitext(main_path)
if "extension" not in fmt:
fmt["extension"] = ext
if ext not in NOTEBOOK_EXTENSIONS:
raise InconsistentPath(
"'{}' is not a notebook. Supported extensions are '{}'.".format(
main_path, "', '".join(NOTEBOOK_EXTENSIONS)
)
)
if ext != fmt["extension"]:
raise InconsistentPath(
"Notebook path '{}' was expected to have extension '{}'".format(
main_path, fmt["extension"]
)
)
# Is there a format that matches the main path?
if formats is None:
formats = [fmt]
for f in formats:
if f["extension"] != fmt["extension"]:
continue
if (
"format_name" in fmt
and "format_name" in f
and f["format_name"] != fmt["format_name"]
):
continue
# extend 'fmt' with the format information (prefix, suffix) from f
fmt = {key: fmt.get(key, value) for key, value in f.items()}
break
suffix = fmt.get("suffix")
prefix = fmt.get("prefix")
if suffix:
if not base.endswith(suffix):
raise InconsistentPath(
"Notebook name '{}' was expected to end with suffix '{}'".format(
base, suffix
)
)
base = base[: -len(suffix)]
if not prefix:
return base
if "//" in prefix:
prefix_root, prefix = prefix.rsplit("//", 1)
else:
prefix_root = ""
sep = separator(base)
notebook_dir, notebook_file_name = split(base, sep)
prefix_dir, prefix_file_name = split(prefix, "/")
base_dir = None
config_file = find_jupytext_configuration_file(notebook_dir)
if config_file:
config_file_dir = os.path.dirname(config_file)
if notebook_dir.startswith(config_file_dir):
base_dir = config_file_dir
notebook_dir = notebook_dir[len(config_file_dir) :]
if prefix_file_name:
if not notebook_file_name.startswith(prefix_file_name):
raise InconsistentPath(
"Notebook name '{}' was expected to start with prefix '{}'".format(
notebook_file_name, prefix_file_name
)
)
notebook_file_name = notebook_file_name[len(prefix_file_name) :]
if prefix_dir:
parent_notebook_dir = notebook_dir
parent_prefix_dir = prefix_dir
actual_folders = list()
while parent_prefix_dir:
parent_prefix_dir, expected_folder = split(parent_prefix_dir, "/")
if expected_folder == "..":
if not actual_folders:
raise InconsistentPath(
"Notebook directory '{}' does not match prefix '{}'".format(
notebook_dir, prefix_dir
)
)
parent_notebook_dir = join(
parent_notebook_dir, actual_folders.pop(), sep
)
else:
parent_notebook_dir, actual_folder = split(parent_notebook_dir, sep)
actual_folders.append(actual_folder)
if actual_folder != expected_folder:
raise InconsistentPath(
"Notebook directory '{}' does not match prefix '{}'".format(
notebook_dir, prefix_dir
)
)
notebook_dir = parent_notebook_dir
if prefix_root:
long_prefix_root = sep + prefix_root + sep
long_notebook_dir = sep + notebook_dir + sep
if long_prefix_root not in long_notebook_dir:
raise InconsistentPath(
"Notebook directory '{}' does not match prefix root '{}'".format(
notebook_dir, prefix_root
)
)
left, right = long_notebook_dir.rsplit(long_prefix_root, 1)
notebook_dir = left + sep + "//" + right
# We are going to remove the last char, but we need to insert it back in the end...
if not right:
sep = notebook_dir[-1]
notebook_dir = notebook_dir[len(sep) : -len(sep)]
if base_dir:
notebook_dir = base_dir + notebook_dir
if not notebook_dir:
return notebook_file_name
return notebook_dir + sep + notebook_file_name
def full_path(base, fmt):
"""Return the full path for the notebook, given the base path"""
ext = fmt["extension"]
suffix = fmt.get("suffix")
prefix = fmt.get("prefix")
full = base
if prefix:
if "//" in prefix:
prefix_root, prefix = prefix.rsplit("//", 1)
else:
prefix_root = ""
prefix_dir, prefix_file_name = split(prefix, "/")
# Local path separator (\\ on windows)
sep = separator(base)
prefix_dir = prefix_dir.replace("/", sep)
if (prefix_root != "") != ("//" in base):
raise InconsistentPath(
"Notebook base name '{}' is not compatible with fmt={}. Make sure you use prefix roots "
"in either none, or all of the paired formats".format(
base, short_form_one_format(fmt)
)
)
if prefix_root:
left, right = base.rsplit("//", 1)
right_dir, notebook_file_name = split(right, sep)
notebook_dir = left + prefix_root + sep + right_dir
else:
notebook_dir, notebook_file_name = split(base, sep)
if prefix_file_name:
notebook_file_name = prefix_file_name + notebook_file_name
if prefix_dir:
dotdot = ".." + sep
while prefix_dir.startswith(dotdot):
prefix_dir = prefix_dir[len(dotdot) :]
notebook_dir = split(notebook_dir, sep)[0]
# Do not add a path separator when notebook_dir is '/'
if notebook_dir and not notebook_dir.endswith(sep):
notebook_dir = notebook_dir + sep
notebook_dir = notebook_dir + prefix_dir
if notebook_dir and not notebook_dir.endswith(sep):
notebook_dir = notebook_dir + sep
full = notebook_dir + notebook_file_name
if suffix:
full = full + suffix
return full + ext
def update_metadata_from_rmd_options(name, value, metadata, use_runtools=False):
"""Map the R Markdown cell visibility options to the Jupyter ones"""
if use_runtools:
for rmd_option, jupyter_options in _RMARKDOWN_TO_RUNTOOLS_OPTION_MAP:
if name == rmd_option[0] and value == rmd_option[1]:
for opt_name, opt_value in jupyter_options:
metadata[opt_name] = opt_value
return True
else:
for rmd_option, tag in _RMARKDOWN_TO_JUPYTER_BOOK_MAP:
if name == rmd_option[0] and value == rmd_option[1]:
metadata.setdefault("tags", []).append(tag)
return True
return False
def find_base_path_and_format(main_path, formats):
"""Return the base path and the format corresponding to the given path"""
for fmt in formats:
try:
return base_path(main_path, fmt), fmt
except InconsistentPath:
continue
raise InconsistentPath(
"Path '{}' matches none of the export formats. "
"Please make sure that jupytext.formats covers the current file "
"(e.g. add '{}' to the export formats)".format(
main_path, os.path.splitext(main_path)[1][1:]
)
)
def paired_paths(main_path, fmt, formats):
"""Return the list of paired notebooks, given main path, and the list of formats"""
if not formats:
return [(main_path, {"extension": os.path.splitext(main_path)[1]})]
formats = long_form_multiple_formats(formats)
# Is there a format that matches the main path?
base = base_path(main_path, fmt, formats)
paths = [full_path(base, f) for f in formats]
if main_path not in paths:
raise InconsistentPath(
"Paired paths '{}' do not include the current notebook path '{}'. "
"Current format is '{}', and paired formats are '{}'.".format(
"','".join(paths),
main_path,
short_form_one_format(fmt),
short_form_multiple_formats(formats),
)
)
if len(paths) > len(set(paths)):
raise InconsistentPath(
"Duplicate paired paths for this notebook. Please fix jupytext.formats."
)
return list(zip(paths, formats))
"""Functions to read or write paired notebooks"""
from collections import namedtuple
import jupytext
from .combine import combine_inputs_with_outputs
from .compare import compare
from .formats import (
check_file_version,
long_form_multiple_formats,
long_form_one_format,
)
from .paired_paths import find_base_path_and_format, full_path, paired_paths
NotebookFile = namedtuple("notebook_file", "path fmt timestamp")
@others
@language python
@tabwidth -4
class PairedFilesDiffer(ValueError):
"""An error when the two representations of a paired notebook differ"""
def write_pair(path, formats, write_one_file):
"""
Call the function 'write_one_file' on each of the paired path/formats
"""
formats = long_form_multiple_formats(formats)
base, _ = find_base_path_and_format(path, formats)
# Save as ipynb first
return_value = None
value = None
for fmt in formats[::-1]:
if fmt["extension"] != ".ipynb":
continue
alt_path = full_path(base, fmt)
value = write_one_file(alt_path, fmt)
if alt_path == path:
return_value = value
# And then to the other formats, in reverse order so that
# the first format is the most recent
for fmt in formats[::-1]:
if fmt["extension"] == ".ipynb":
continue
alt_path = full_path(base, fmt)
value = write_one_file(alt_path, fmt)
if alt_path == path:
return_value = value
# Update modified timestamp to match that of the pair #207
if isinstance(return_value, dict) and "last_modified" in return_value:
return_value["last_modified"] = value["last_modified"]
return return_value
def latest_inputs_and_outputs(
path, fmt, formats, get_timestamp, contents_manager_mode=False
):
"""Given a notebook path, its format and paired formats, and a function that
returns the timestamp for each (or None if the file does not exist), return
the most recent notebook for the inputs and outputs, respectively"""
timestamp_inputs = None
timestamp_outputs = None
inputs_path = None
outputs_path = None
input_fmt = None
output_fmt = None
fmt = long_form_one_format(fmt)
formats = long_form_multiple_formats(formats)
for alt_path, alt_fmt in paired_paths(path, fmt, formats):
# In the contents manager, we don't read another text file if the current notebook is in already in text mode
if (
contents_manager_mode
and alt_fmt["extension"] != ".ipynb"
and fmt["extension"] != ".ipynb"
):
if any(
alt_fmt.get(key) != fmt.get(key)
for key in ["extension", "suffix", "prefix"]
):
continue
timestamp = get_timestamp(alt_path)
if timestamp is None:
continue
if alt_fmt["extension"] == ".ipynb":
if timestamp_outputs is None or timestamp > timestamp_outputs:
timestamp_outputs = timestamp
outputs_path, output_fmt = alt_path, alt_fmt
elif timestamp_inputs is None or timestamp > timestamp_inputs:
timestamp_inputs = timestamp
inputs_path, input_fmt = alt_path, alt_fmt
if timestamp_inputs is None or (
not contents_manager_mode
and timestamp_outputs
and timestamp_outputs > timestamp_inputs
):
timestamp_inputs = timestamp_outputs
inputs_path, input_fmt = outputs_path, output_fmt
return (
NotebookFile(inputs_path, input_fmt, timestamp_inputs),
NotebookFile(outputs_path, output_fmt, timestamp_outputs),
)
def read_pair(inputs, outputs, read_one_file, must_match=False):
"""Read a notebook given its inputs and outputs path and formats"""
if not outputs.path or outputs.path == inputs.path:
return read_one_file(inputs.path, inputs.fmt)
notebook = read_one_file(inputs.path, inputs.fmt)
check_file_version(notebook, inputs.path, outputs.path)
notebook_with_outputs = read_one_file(outputs.path, outputs.fmt)
if must_match:
in_text = jupytext.writes(notebook, inputs.fmt)
out_text = jupytext.writes(notebook_with_outputs, inputs.fmt)
diff = compare(out_text, in_text, outputs.path, inputs.path, return_diff=True)
if diff:
raise PairedFilesDiffer(diff)
notebook = combine_inputs_with_outputs(
notebook, notebook_with_outputs, fmt=inputs.fmt
)
return notebook
"""Jupyter notebook to Markdown and back, using Pandoc"""
import os
import subprocess
import tempfile
# Copy nbformat reads and writes to avoid them being patched in the contents manager!!
from nbformat import reads as ipynb_reads
from nbformat import writes as ipynb_writes
from packaging.version import parse
@others
@language python
@tabwidth -4
class PandocError(OSError):
"""An error related to Pandoc"""
def pandoc(args, filein=None, fileout=None):
"""Execute pandoc with the given arguments"""
cmd = ["pandoc"]
if filein:
cmd.append(filein)
if fileout:
cmd.append("-o")
cmd.append(fileout)
cmd.extend(args.split())
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode:
raise PandocError(
f"pandoc exited with return code {proc.returncode}\n{str(err)}"
)
return out.decode("utf-8")
class ParsingContext:
"""
Class for determining where to split rmd options
"""
@others
def is_pandoc_available(min_version="2.7.2", max_version=None):
"""Is Pandoc>=2.7.2 available?"""
try:
raise_if_pandoc_is_not_available(
min_version=min_version, max_version=max_version
)
return True
except PandocError:
return False
def raise_if_pandoc_is_not_available(min_version="2.7.2", max_version=None):
"""Raise with an informative error message if pandoc is not available"""
version = pandoc_version()
if version == "N/A":
raise PandocError(
f"The Pandoc Markdown format requires 'pandoc>={min_version}', "
"but pandoc was not found"
)
if parse(version) < parse(min_version):
raise PandocError(
f"The Pandoc Markdown format requires 'pandoc>={min_version}', "
f"but pandoc version {version} was found"
)
if max_version and parse(version) > parse(max_version):
raise PandocError(
f"The Pandoc Markdown format requires 'pandoc<={max_version}', "
f"but pandoc version {version} was found"
)
return version
def pandoc_version():
"""Pandoc's version number"""
try:
return pandoc("--version").splitlines()[0].split()[1]
except OSError:
return "N/A"
def md_to_notebook(text):
"""Convert a Markdown text to a Jupyter notebook, using Pandoc"""
raise_if_pandoc_is_not_available()
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(text.encode("utf-8"))
tmp_file.close()
if parse(pandoc_version()) < parse("2.11.2"):
pandoc_args = "--from markdown --to ipynb -s --atx-headers --wrap=preserve --preserve-tabs"
else:
pandoc_args = "--from markdown --to ipynb -s --markdown-headings=atx --wrap=preserve --preserve-tabs"
pandoc(
pandoc_args,
tmp_file.name,
tmp_file.name,
)
with open(tmp_file.name, encoding="utf-8") as opened_file:
notebook = ipynb_reads(opened_file.read(), as_version=4)
os.unlink(tmp_file.name)
return notebook
def notebook_to_md(notebook):
"""Convert a notebook to its Markdown representation, using Pandoc"""
raise_if_pandoc_is_not_available()
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(ipynb_writes(notebook).encode("utf-8"))
tmp_file.close()
if parse(pandoc_version()) < parse("2.11.2"):
pandoc_args = "--from ipynb --to markdown -s --atx-headers --wrap=preserve --preserve-tabs"
else:
pandoc_args = "--from ipynb --to markdown -s --markdown-headings=atx --wrap=preserve --preserve-tabs"
pandoc(
pandoc_args,
tmp_file.name,
tmp_file.name,
)
with open(tmp_file.name, encoding="utf-8") as opened_file:
text = opened_file.read()
os.unlink(tmp_file.name)
return "\n".join(text.splitlines())
"""Determine how many blank lines should be inserted between two cells"""
from .stringparser import StringParser
@others
@language python
@tabwidth -4
def next_instruction_is_function_or_class(lines):
"""Is the first non-empty, non-commented line of the cell either a function or a class?"""
parser = StringParser("python")
for i, line in enumerate(lines):
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
if not line.strip(): # empty line
if i > 0 and not lines[i - 1].strip():
return False
continue
if line.startswith("def ") or line.startswith("class "):
return True
if line.startswith(("#", "@", " ", ")")):
continue
return False
return False
def cell_ends_with_function_or_class(lines):
"""Does the last line of the cell belong to an indented code?"""
non_quoted_lines = []
parser = StringParser("python")
for line in lines:
if not parser.is_quoted():
non_quoted_lines.append(line)
parser.read_line(line)
# find the first line, starting from the bottom, that is not indented
lines = non_quoted_lines[::-1]
for i, line in enumerate(lines):
if not line.strip():
# two blank lines? we won't need to insert more blank lines below this cell
if i > 0 and not lines[i - 1].strip():
return False
continue
if line.startswith(("#", " ", ")")):
continue
if line.startswith("def ") or line.startswith("class "):
return True
return False
return False
def cell_ends_with_code(lines):
"""Is the last line of the cell a line with code?"""
if not lines:
return False
if not lines[-1].strip():
return False
if lines[-1].startswith("#"):
return False
return True
def cell_has_code(lines):
"""Is there any code in this cell?"""
for i, line in enumerate(lines):
stripped_line = line.strip()
if stripped_line.startswith("#"):
continue
# Two consecutive blank lines?
if not stripped_line:
if i > 0 and not lines[i - 1].strip():
return False
continue
return True
return False
parenthesis_count = 0
curly_bracket_count = 0
square_bracket_count = 0
in_single_quote = False
in_double_quote = False
def __init__(self, line):
self.line = line
def pep8_lines_between_cells(prev_lines, next_lines, ext):
"""How many blank lines should be added between the two python paragraphs to make them pep8?"""
if not next_lines:
return 1
if not prev_lines:
return 0
if ext != ".py":
return 1
if cell_ends_with_function_or_class(prev_lines):
return 2 if cell_has_code(next_lines) else 1
if cell_ends_with_code(prev_lines) and next_instruction_is_function_or_class(
next_lines
):
return 2
return 1
"""Jupyter notebook to Quarto Markdown and back, using 'quarto convert'"""
import os
import subprocess
import tempfile
# Copy nbformat reads and writes to avoid them being patched in the contents manager!!
from nbformat import reads as ipynb_reads
from nbformat import writes as ipynb_writes
from packaging.version import parse
QUARTO_MIN_VERSION = "0.2.134"
@others
@language python
@tabwidth -4
class QuartoError(OSError):
"""An error related to Quarto"""
def quarto(args, filein=None):
"""Execute quarto with the given arguments"""
cmd = ["quarto"] + args.split()
if filein:
cmd.append(filein)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode:
raise QuartoError(
f"{' '.join(cmd)} exited with return code {proc.returncode}\n{err.decode('utf-8')}"
)
return out.decode("utf-8")
def is_quarto_available(min_version=QUARTO_MIN_VERSION):
"""Is Quarto available?"""
try:
raise_if_quarto_is_not_available(min_version=min_version)
return True
except QuartoError:
return False
def raise_if_quarto_is_not_available(min_version=QUARTO_MIN_VERSION):
"""Raise with an informative error message if quarto is not available"""
version = quarto_version()
if version == "N/A":
raise QuartoError(
f"The Quarto Markdown format requires 'quarto>={min_version}', "
"but quarto was not found"
)
if parse(version) < parse(min_version):
raise QuartoError(
f"The Quarto Markdown format requires 'quarto>={min_version}', "
f"but quarto version {version} was not found"
)
return version
def quarto_version():
"""Quarto's version number"""
try:
return quarto("--version").strip()
except OSError:
return "N/A"
def qmd_to_notebook(text):
"""Convert a Quarto Markdown notebook to a Jupyter notebook"""
raise_if_quarto_is_not_available()
tmp_qmd_file = tempfile.NamedTemporaryFile(delete=False, suffix=".qmd")
tmp_qmd_file.write(text.encode("utf-8"))
tmp_qmd_file.close()
quarto("convert --log-level warning", tmp_qmd_file.name)
tmp_ipynb_file_name = tmp_qmd_file.name[:-4] + ".ipynb"
with open(tmp_ipynb_file_name, encoding="utf-8") as ipynb_file:
notebook = ipynb_reads(ipynb_file.read(), as_version=4)
os.unlink(tmp_qmd_file.name)
os.unlink(tmp_ipynb_file_name)
return notebook
def notebook_to_qmd(notebook):
"""Convert a Jupyter notebook to its Quarto Markdown representation"""
raise_if_quarto_is_not_available()
tmp_ipynb_file = tempfile.NamedTemporaryFile(delete=False, suffix=".ipynb")
tmp_ipynb_file.write(ipynb_writes(notebook).encode("utf-8"))
tmp_ipynb_file.close()
quarto("convert --log-level warning", tmp_ipynb_file.name)
tmp_qmd_file_name = tmp_ipynb_file.name[:-6] + ".qmd"
with open(tmp_qmd_file_name, encoding="utf-8") as qmd_file:
text = qmd_file.read()
os.unlink(tmp_ipynb_file.name)
os.unlink(tmp_qmd_file_name)
return "\n".join(text.splitlines())
"""Raise the given error at evaluation time"""
@others
@language python
@tabwidth -4
def in_global_expression(self):
"""Currently inside an expression"""
return (
self.parenthesis_count == 0
and self.curly_bracket_count == 0
and self.square_bracket_count == 0
and not self.in_single_quote
and not self.in_double_quote
)
def reraise(error):
"""Return a function that raises the given error when evaluated"""
def local_function(*args, **kwargs):
raise error
return local_function
"""A simple file parser that can tell whether the first character of a line
is quoted or not"""
from .languages import _COMMENT
@others
@language python
@tabwidth -4
class StringParser:
"""A simple file parser that can tell whether the first character of a line
is quoted or not"""
@others
single = None
triple = None
triple_start = None
def __init__(self, language):
self.ignore = language is None
self.python = language != "R"
self.comment = _COMMENT.get(language)
def is_quoted(self):
"""Is the next line quoted?"""
if self.ignore:
return False
return self.single or self.triple
def read_line(self, line):
"""Read a new line"""
if self.ignore:
return
# Do not search for quotes when the line is commented out (and not quoted)
if (
not self.is_quoted()
and self.comment is not None
and line.lstrip().startswith(self.comment)
):
return
self.triple_start = -1
for i, char in enumerate(line):
if (
self.single is None
and self.triple is None
and self.comment
and self.comment.startswith(char)
and line[i:].startswith(self.comment)
):
break
if char not in ['"', "'"]:
continue
# Is the char escaped?
if line[i - 1 : i] == "\\":
continue
if self.single == char:
self.single = None
continue
if self.single is not None:
continue
if not self.python:
continue
if line[i - 2 : i + 1] == 3 * char and i >= self.triple_start + 3:
# End of a triple quote
if self.triple == char:
self.triple = None
self.triple_start = i
continue
# Are we looking for a different triple quote?
if self.triple is not None:
continue
# Triple quote starting
self.triple = char
self.triple_start = i
continue
# Inside a multiline quote
if self.triple is not None:
continue
self.single = char
# Line ended
if self.python:
self.single = None
"""Jupytext's version number"""
__version__ = "1.16.4"
@language python
@tabwidth -4
@language python
@tabwidth -4
""" Main for Jupytext_config
Call with (e.g.)::
python -m jupytext_config list-default-viewer
"""
import sys
from .jupytext_config import main
if __name__ == "__main__":
sys.exit(main())
@language python
@tabwidth -4
def count_special_chars(self, char, prev_char):
"""Update parenthesis counters"""
if char == "(":
self.parenthesis_count += 1
elif char == ")":
self.parenthesis_count -= 1
if self.parenthesis_count < 0:
raise RMarkdownOptionParsingError(
'Option line "{}" has too many '
"closing parentheses".format(self.line)
)
elif char == "{":
self.curly_bracket_count += 1
elif char == "}":
self.curly_bracket_count -= 1
if self.curly_bracket_count < 0:
raise RMarkdownOptionParsingError(
'Option line "{}" has too many '
"closing curly brackets".format(self.line)
)
elif char == "[":
self.square_bracket_count += 1
elif char == "]":
self.square_bracket_count -= 1
if self.square_bracket_count < 0:
raise RMarkdownOptionParsingError(
'Option line "{}" has too many '
"closing square brackets".format(self.line)
)
elif char == "'" and prev_char != "\\" and not self.in_double_quote:
self.in_single_quote = not self.in_single_quote
elif char == '"' and prev_char != "\\" and not self.in_single_quote:
self.in_double_quote = not self.in_double_quote
"""
the code for
jupytext-config set-default-viewer
and related subcommands
"""
import sys
from argparse import ArgumentParser
from pathlib import Path
import jupyter_core.paths as jupyter_core_paths
from .labconfig import LabConfig
@others
@language python
@tabwidth -4
class SubCommand:
"""
a subcommand for jupytext-config
"""
@others
def __init__(self, name, help):
self.name = name
self.help = help
def main(self, args):
"""
return 0 if all goes well
"""
raise NotImplementedError() # pragma: no cover
class ListDefaultViewer(SubCommand):
@others
def __init__(self):
super().__init__("list-default-viewer", "Display current settings in labconfig")
def main(self, args):
LabConfig(settings_file=args.settings_file).read().list_default_viewer()
return 0
def fill_parser(self, subparser):
pass
class SetDefaultViewer(SubCommand):
@others
def __init__(self):
super().__init__("set-default-viewer", "Set default viewers for JupyterLab")
def parse_rmd_options(line):
"""
Given a R markdown option line, returns a list of pairs name,value
:param line:
:return:
"""
parsing_context = ParsingContext(line)
result = []
prev_char = ""
name = ""
value = ""
for char in "," + line + ",":
if parsing_context.in_global_expression():
if char == ",":
if name != "" or value != "":
if result and name == "":
raise RMarkdownOptionParsingError(
'Option line "{}" has no name for '
"option value {}".format(line, value)
)
result.append((name.strip(), value.strip()))
name = ""
value = ""
elif char == "=":
if name == "":
name = value
value = ""
else:
value += char
else:
parsing_context.count_special_chars(char, prev_char)
value += char
else:
parsing_context.count_special_chars(char, prev_char)
value += char
prev_char = char
if not parsing_context.in_global_expression():
raise RMarkdownOptionParsingError(
f'Option line "{line}" is not properly terminated'
)
return result
def main(self, args):
LabConfig(settings_file=args.settings_file).read().set_default_viewers(
args.doctype
).write()
return 0
def fill_parser(self, subparser):
subparser.add_argument(
"doctype",
nargs="*",
help=f"the document types to be associated with the notebook editor; "
f"defaults to {' '.join(LabConfig.DOCTYPES)}",
)
class UnsetDefaultViewer(SubCommand):
@others
def __init__(self):
super().__init__("unset-default-viewer", "Unset default viewers for JupyterLab")
def main(self, args):
LabConfig(settings_file=args.settings_file).read().unset_default_viewers(
args.doctype
).write()
return 0
def fill_parser(self, subparser):
subparser.add_argument(
"doctype",
nargs="*",
help=f"the document types for which the default viewer will be unset; "
f"defaults to {' '.join(LabConfig.DOCTYPES)}",
)
# create the subcommands
SUBCOMMANDS = [
ListDefaultViewer(),
SetDefaultViewer(),
UnsetDefaultViewer(),
]
def main():
parser = ArgumentParser()
parser.add_argument(
"--settings-file",
default=Path(jupyter_core_paths.jupyter_config_dir())
/ "labconfig"
/ "default_setting_overrides.json",
)
subparsers = parser.add_subparsers(required=True)
for subcommand in SUBCOMMANDS:
subparser = subparsers.add_parser(subcommand.name, help=subcommand.help)
subparser.set_defaults(subcommand=subcommand)
subcommand.fill_parser(subparser)
args = parser.parse_args(sys.argv[1:] or ["--help"])
return args.subcommand.main(args)
"""
helper to inspect / initialize jupyterlab labconfig settings
that are required to open jupytext notebooks in jupyterlab by default
when these settings are not present, a double click on a jupytext
notebook will cause jupyterlab to open it in an editor, i.e. as a text file
"""
import json
import logging
from pathlib import Path
@others
@language python
@tabwidth -4
class LabConfig:
@others
DOCTYPES = [
"python",
"markdown",
"myst",
"r-markdown",
"quarto",
"julia",
"r",
]
def __init__(self, *, settings_file, logger=None):
self.settings_file = Path(settings_file)
self.logger = logger or logging.getLogger(__name__)
self.config = {}
def rmd_options_to_metadata(options, use_runtools=False):
"""Parse rmd options and return a metadata dictionary"""
options = re.split(r"\s|,", options, maxsplit=1)
# Special case Wolfram Language, which sadly has a space in the language
# name.
if options[0:2] == ["wolfram", "language"]:
options[0:2] = ["wolfram language"]
if len(options) == 1:
language = options[0]
chunk_options = []
else:
language, others = options
language = language.rstrip(" ,")
others = others.lstrip(" ,")
chunk_options = parse_rmd_options(others)
language = "R" if language == "r" else language
metadata = {}
for i, opt in enumerate(chunk_options):
name, value = opt
if i == 0 and name == "":
metadata["name"] = value
continue
if update_metadata_from_rmd_options(
name, value, metadata, use_runtools=use_runtools
):
continue
try:
metadata[name] = _py_logical_values(value)
continue
except RLogicalValueError:
metadata[name] = value
for name in metadata:
try_eval_metadata(metadata, name)
if "eval" in metadata and not is_active(".Rmd", metadata):
del metadata["eval"]
return metadata.get("language") or language, metadata
def read(self):
"""
read the labconfig settings file
"""
if self.settings_file.exists():
with self.settings_file.open() as fid:
self.config = json.load(fid)
else:
self.logger.info(f"Could not read from {self.settings_file} (not found)")
return self
def get_viewers(self):
return self.config.setdefault(
"@jupyterlab/docmanager-extension:plugin", {}
).setdefault("defaultViewers", {})
def list_default_viewer(self):
"""
list the current labconfig settings
"""
self.logger.debug(
f"Current @jupyterlab/docmanager-extension:plugin in {self.settings_file}"
)
for key, value in self.get_viewers().items():
print(f"{key}: {value}")
def set_default_viewers(self, doctypes=None):
if not doctypes:
doctypes = self.DOCTYPES
for doctype in doctypes:
self.set_default_viewer(doctype)
return self
def set_default_viewer(self, doctype):
viewers = self.get_viewers()
if doctype not in viewers:
viewers[doctype] = "Jupytext Notebook"
def unset_default_viewers(self, doctypes=None):
if not doctypes:
doctypes = self.DOCTYPES
for doctype in doctypes:
self.unset_default_viewer(doctype)
return self
def unset_default_viewer(self, doctype):
viewers = self.get_viewers()
if doctype in viewers:
del viewers[doctype]
def write(self):
"""
write the labconfig settings file
"""
self.settings_file.parent.mkdir(parents=True, exist_ok=True)
with self.settings_file.open("w") as fid:
json.dump(self.config, fid, indent=2)
"""`jupytext` as a command line tool"""
import argparse
import glob
import json
import os
import re
import shlex
import subprocess
import sys
import warnings
from copy import copy
from tempfile import NamedTemporaryFile
from .combine import combine_inputs_with_outputs
from .compare import NotebookDifference, compare, test_round_trip_conversion
from .config import load_jupytext_config, notebook_formats
from .formats import (
_BINARY_FORMAT_OPTIONS,
_VALID_FORMAT_OPTIONS,
JUPYTEXT_FORMATS,
check_auto_ext,
check_file_version,
long_form_multiple_formats,
long_form_one_format,
short_form_one_format,
)
from .header import recursive_update
from .jupytext import create_prefix_dir, read, reads, write, writes
from .kernels import find_kernel_specs, get_kernel_spec, kernelspec_from_language
from .languages import _SCRIPT_EXTENSIONS
from .paired_paths import (
InconsistentPath,
base_path,
find_base_path_and_format,
full_path,
paired_paths,
)
from .pairs import latest_inputs_and_outputs, read_pair, write_pair
from .version import __version__
@others
@language python
@tabwidth -4
def system(*args, **kwargs):
"""Execute the given bash command"""
kwargs.setdefault("stdout", subprocess.PIPE)
proc = subprocess.Popen(args, **kwargs)
out, _ = proc.communicate()
if proc.returncode:
raise SystemExit(proc.returncode)
return out.decode("utf-8")
def try_eval_metadata(metadata, name):
"""Evaluate the metadata to a python object, if possible"""
value = metadata[name]
if not isinstance(value, str):
return
if (value.startswith('"') and value.endswith('"')) or (
value.startswith("'") and value.endswith("'")
):
metadata[name] = value[1:-1]
return
if value.startswith("c(") and value.endswith(")"):
value = "[" + value[2:-1] + "]"
elif value.startswith("list(") and value.endswith(")"):
value = "[" + value[5:-1] + "]"
try:
metadata[name] = ast.literal_eval(value)
except (SyntaxError, ValueError):
if name != "name":
metadata[name] = "#R_CODE#" + value
return
def tool_version(tool):
try:
args = tool.split(" ")
args.append("--version")
return system(*args)
except (OSError, SystemExit): # pragma: no cover
return None
def str2bool(value):
"""Parse Yes/No/Default string
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse"""
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
if value.lower() in ("no", "false", "f", "n", "0"):
return False
if value.lower() in ("d", "default", ""):
return None
raise argparse.ArgumentTypeError("Expected: (Y)es/(T)rue/(N)o/(F)alse/(D)efault")
def parse_jupytext_args(args=None):
"""Command line parser for jupytext"""
parser = argparse.ArgumentParser(
description="Jupyter Notebooks as Markdown Documents, Julia, Python or R Scripts",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Input
parser.add_argument(
"notebooks",
help="One or more notebook(s). "
"Notebook is read from stdin when this argument is empty.",
nargs="*",
)
parser.add_argument(
"--from",
dest="input_format",
help="Jupytext format for the input(s). Inferred from the "
"file extension and content when missing.",
)
# Destination format & act on metadata
parser.add_argument(
"--to",
dest="output_format",
help=(
"The destination format: 'ipynb', 'markdown' or 'script', or a file extension: "
"'md', 'Rmd', 'jl', 'py', 'R', ..., 'auto' (script extension matching the notebook language), "
"or a combination of an extension and a format name, e.g. {} ".format(
", ".join(
{
f"md:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".md"
}
)
)
+ " or {}. ".format(
", ".join(
{
f"py:{fmt.format_name}"
for fmt in JUPYTEXT_FORMATS
if fmt.extension == ".py"
}
)
)
+ "The default format for scripts is the 'light' format, "
"which uses few cell markers (none when possible). "
"Alternatively, a format compatible with many editors is the "
"'percent' format, which uses '# %%%%' as cell markers. "
"The main formats (markdown, light, percent) preserve "
"notebooks and text documents in a roundtrip. Use the "
"--test and and --test-strict commands to test the roundtrip on your files. "
"Read more about the available formats at "
"https://jupytext.readthedocs.io/en/latest/formats.html"
),
)
# Destination file
parser.add_argument(
"-o",
"--output",
help="Destination file. Defaults to the original file, "
"with prefix/suffix/extension changed according to "
"the destination format. "
"Use '-' to print the notebook on stdout.",
)
parser.add_argument(
"--update",
action="store_true",
help="Preserve the output cells when the destination "
"notebook is an .ipynb file that already exists",
)
parser.add_argument(
"--set-formats",
type=str,
help="Turn the notebook or text document to one or more alternative representations "
"with e.g. '--set-formats ipynb,py:light'. "
"The --set-formats option also triggers the creation/update of all paired files",
)
# Action: convert(default)/version/list paired paths/sync/apply/test
action = parser.add_mutually_exclusive_group()
action.add_argument(
"--sync",
"-s",
help="Synchronize the content of the paired representations of "
"the given notebook. Input cells are taken from the file that "
"was last modified, and outputs are read from the ipynb file, "
"if present.",
action="store_true",
)
action.add_argument(
"--paired-paths",
"-p",
help="List the locations of the alternative representations for this notebook.",
action="store_true",
)
parser.add_argument(
"--format-options",
"--opt",
action="append",
help="Set format options with e.g. "
"'--opt comment_magics=true' or '--opt notebook_metadata_filter=-kernelspec'.",
)
parser.add_argument(
"--update-metadata",
default={},
type=json.loads,
help="Update the notebook metadata with the desired dictionary. "
"Argument must be given in JSON format. For instance, if you "
"want to activate a pairing in the generated file, use e.g. "
"""--update-metadata '{"jupytext":{"formats":"ipynb,py:light"}}' """
"See also the --opt and --set-formats options for other ways "
"to operate on the Jupytext metadata.",
)
parser.add_argument(
"--use-source-timestamp",
help="Set the modification timestamp of the output file(s) equal"
"to that of the source file, and keep the source file and "
"its timestamp unchanged.",
action="store_true",
)
parser.add_argument(
"--warn-only",
"-w",
action="store_true",
help="Only issue a warning and continue processing other notebooks "
"when the conversion of a given notebook fails",
)
action.add_argument(
"--test",
action="store_true",
help="Test that the notebook is stable under a round trip conversion, "
"up to the expected changes",
)
action.add_argument(
"--test-strict",
action="store_true",
help="Test that the notebook is strictly stable under a round trip conversion",
)
parser.add_argument(
"--stop",
"-x",
dest="stop_on_first_error",
action="store_true",
help="In --test mode, stop on first round trip conversion error, and report stack traceback",
)
# Pipe notebook inputs into other commands
parser.add_argument(
"--pipe",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and read the notebook back. For instance, reformat "
"your notebook with: "
"'jupytext notebook.ipynb --pipe black' "
"If you want to reformat it and sync the paired representation, execute: "
"'jupytext notebook.ipynb --sync --pipe black' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --pipe 'black {}'",
)
parser.add_argument(
"--diff",
"-d",
action="store_true",
help="Show the differences between (the inputs) of two notebooks",
)
parser.add_argument(
"--diff-format",
help="The text format used to show differences in --diff",
)
parser.add_argument(
"--check",
action="append",
help="Pipe the text representation (in format --pipe-fmt) of the notebook into "
"another program, and test that the returned value is non zero. For "
"instance, test that your notebook is pep8 compliant with: "
"'jupytext notebook.ipynb --check flake8' "
"or run pytest on your notebook with: "
"'jupytext notebook.ipynb --check pytest' "
"In case the program that you want to execute does not accept pipes, use {} "
"as a placeholder for a temporary file name into which jupytext will "
"write the text representation of the notebook, e.g.: "
"jupytext notebook.ipynb --check 'pytest {}'",
)
parser.add_argument(
"--pipe-fmt",
default="auto:percent",
help="The format in which the notebook should be piped to other programs, "
"when using the --pipe and/or --check commands.",
)
# Execute the notebook
parser.add_argument(
"--set-kernel",
"-k",
type=str,
help="Set the kernel with the given name on the notebook. "
"Use '--set-kernel -' to set a kernel matching the current "
"environment on Python notebooks, and matching the notebook "
"language otherwise (get the list of available kernels with "
"'jupyter kernelspec list')",
)
parser.add_argument(
"--execute",
action="store_true",
help="Execute the notebook with the given kernel. In the "
"--pre-commit-mode, the notebook is executed only if a code "
"cell changed, or if some execution outputs are missing "
"or not ordered.",
)
parser.add_argument(
"--run-path",
type=str,
help="Execute the notebook at the given path (defaults to the notebook parent directory)",
)
parser.add_argument(
"--quiet",
"-q",
action="store_true",
help="Quiet mode: do not comment about files being updated or created",
)
parser.add_argument(
"--show-changes",
action="store_true",
help="Display the diff for each output file",
)
action.add_argument(
"--version",
"-v",
action="store_true",
help="Show jupytext's version number and exit",
)
parser.add_argument(
"--pre-commit",
action="store_true",
help="Ignore the notebook argument, and instead apply Jupytext "
"on the notebooks found in the git index, which have an "
"extension that matches the (optional) --from argument.",
)
parser.add_argument(
"--pre-commit-mode",
action="store_true",
help="This is a mode that is compatible with the pre-commit framework. "
"In this mode, --sync won't use timestamp but instead will "
"determines the source notebook as the element of the pair "
"that is added to the git index. An alert is raised if multiple inconsistent representations are "
"in the index. It also raises an alert after updating the paired files or outputs if those "
"files need to be added to the index. Finally, filepaths that aren't in the source format "
"you are trying to convert from are ignored.",
)
return parser.parse_args(args)
def notebooks_in_git_index(fmt):
"""Return the list of modified and deleted ipynb files in the git index that match the given format"""
git_status = system("git", "status", "--porcelain")
re_modified = re.compile(r"^[AM]+\s+(?P<name>.*)", re.MULTILINE)
modified_files_in_git_index = re_modified.findall(git_status)
files = []
for nb_file in modified_files_in_git_index:
if nb_file.startswith('"') and nb_file.endswith('"'):
nb_file = nb_file[1:-1]
try:
base_path(nb_file, fmt)
files.append(nb_file)
except InconsistentPath:
continue
return files
def is_untracked(filepath):
"""Check whether a file was created or modified and needs to be added to the git index"""
if not filepath:
return False
output = system("git", "ls-files", filepath).strip()
if output == "":
return True
output = system("git", "diff", filepath).strip()
if output != "":
return True
return False
def print_paired_paths(nb_file, fmt):
"""Display the paired paths for this notebook"""
notebook = read(nb_file, fmt=fmt)
formats = notebook.metadata.get("jupytext", {}).get("formats")
if formats:
for path, _ in paired_paths(nb_file, fmt, formats):
if path != nb_file:
sys.stdout.write(path + "\n")
def set_format_options(fmt, format_options):
"""Apply the desired format options to the format description fmt"""
if not format_options:
return
for opt in format_options:
try:
key, value = opt.split("=")
except ValueError as err:
raise ValueError(
"Format options are expected to be of the form key=value, not '{}'".format(
opt
)
) from err
if key not in _VALID_FORMAT_OPTIONS:
raise ValueError(
"'{}' is not a valid format option. Expected one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)
)
)
if key in _BINARY_FORMAT_OPTIONS:
value = str2bool(value)
fmt[key] = value
def set_prefix_and_suffix(fmt, formats, nb_file):
"""Add prefix and suffix information from jupytext.formats if format and path matches"""
for alt_fmt in long_form_multiple_formats(formats):
if alt_fmt["extension"] == fmt["extension"] and fmt.get(
"format_name"
) == alt_fmt.get("format_name"):
try:
base_path(nb_file, alt_fmt)
fmt.update(alt_fmt)
return
except InconsistentPath:
continue
def is_active(ext, metadata, default=True):
"""Is the cell active for the given file extension?"""
if metadata.get("run_control", {}).get("frozen") is True:
return ext == ".ipynb"
for tag in metadata.get("tags", []):
if tag.startswith("active-"):
return ext.replace(".", "") in tag.split("-")
if "active" not in metadata:
return default
return ext.replace(".", "") in re.split(r"\.|,", metadata["active"])
class NotAPairedNotebook(ValueError):
"""An error raised when a notebook is not a paired notebook"""
class InconsistentVersions(ValueError):
"""An error raised when two paired files in the git index contain inconsistent representations"""
def file_in_git_index(path):
if not os.path.isfile(path):
return False
return system("git", "status", "--porcelain", path).strip().startswith(("M", "A"))
def git_timestamp(path):
if not os.path.isfile(path):
return None
# Files that are in the git index are considered most recent
if file_in_git_index(path):
return float("inf")
# Return the commit timestamp
try:
git_ts_str = system("git", "log", "-1", "--pretty=%ct", path).strip()
except SystemExit as err:
if err.code == 128:
# git not initialized
git_ts_str = ""
else:
raise
if git_ts_str:
return float(git_ts_str)
# The file is not in the git index
return get_timestamp(path)
def get_timestamp(path):
if not os.path.isfile(path):
return None
return os.lstat(path).st_mtime
def load_paired_notebook(notebook, fmt, config, formats, nb_file, log, pre_commit_mode):
"""Update the notebook with the inputs and outputs of the most recent paired files"""
if not formats:
raise NotAPairedNotebook(f"{shlex.quote(nb_file)} is not a paired notebook")
formats = long_form_multiple_formats(formats)
_, fmt_with_prefix_suffix = find_base_path_and_format(nb_file, formats)
fmt.update(fmt_with_prefix_suffix)
def read_one_file(path, fmt):
if path == nb_file:
return notebook
log(f"[jupytext] Loading {shlex.quote(path)}")
return read(path, fmt=fmt, config=config)
if pre_commit_mode and file_in_git_index(nb_file):
# We raise an error if two representations of this notebook in the git index are inconsistent
nb_files_in_git_index = sorted(
(
(alt_path, alt_fmt)
for alt_path, alt_fmt in paired_paths(nb_file, fmt, formats)
if file_in_git_index(alt_path)
),
key=lambda x: 0 if x[1]["extension"] != ".ipynb" else 1,
)
if len(nb_files_in_git_index) > 1:
path0, fmt0 = nb_files_in_git_index[0]
with open(path0, encoding="utf-8") as fp:
text0 = fp.read()
for alt_path, alt_fmt in nb_files_in_git_index[1:]:
nb = read(alt_path, fmt=alt_fmt, config=config)
alt_text = writes(nb, fmt=fmt0, config=config)
if alt_text != text0:
diff = compare(alt_text, text0, alt_path, path0, return_diff=True)
raise InconsistentVersions(
f"{shlex.quote(alt_path)} and {shlex.quote(path0)} are inconsistent.\n"
+ diff
+ f"\nPlease revert JUST ONE of the files with EITHER\n"
f" git reset {shlex.quote(alt_path)} && git checkout -- {shlex.quote(alt_path)}\nOR\n"
f" git reset {shlex.quote(path0)} && git checkout -- {shlex.quote(path0)}\n"
)
inputs, outputs = latest_inputs_and_outputs(
nb_file, fmt, formats, git_timestamp if pre_commit_mode else get_timestamp
)
notebook = read_pair(inputs, outputs, read_one_file)
return notebook, inputs.path, outputs.path
def exec_command(command, input=None, capture=False, warn_only=False):
"""Execute the desired command, and pipe the given input into it"""
assert isinstance(command, list)
sys.stdout.write("[jupytext] Executing {}\n".format(" ".join(command)))
process = subprocess.Popen(
command,
**(
dict(stdout=subprocess.PIPE, stdin=subprocess.PIPE)
if input is not None
else {}
),
)
out, err = process.communicate(input=input)
if out and not capture:
sys.stdout.write(out.decode("utf-8"))
if err:
sys.stderr.write(err.decode("utf-8"))
if process.returncode:
msg = f"The command '{' '.join(command)}' exited with code {process.returncode}"
hint = (
"" if warn_only else " (use --warn-only to turn this error into a warning)"
)
sys.stderr.write(
f"[jupytext] {'Warning' if warn_only else 'Error'}: {msg}{hint}\n"
)
if not warn_only:
raise SystemExit(process.returncode)
return out
def pipe_notebook(
notebook,
command,
fmt="py:percent",
update=True,
prefix=None,
directory=None,
warn_only=False,
):
"""Pipe the notebook, in the desired representation, to the given command. Update the notebook
with the returned content if desired."""
if command in ["black", "flake8", "autopep8"]:
command = command + " -"
elif command in ["pytest", "unittest"]:
command = command + " {}"
fmt = long_form_one_format(
fmt, notebook.metadata, auto_ext_requires_language_info=False
)
fmt = check_auto_ext(fmt, notebook.metadata, "--pipe-fmt")
text = writes(notebook, fmt)
command = shlex.split(command)
if "{}" in command:
if prefix is not None:
prefix = prefix + (" " if " " in prefix else "_")
tmp_file_args = dict(
mode="w+",
encoding="utf8",
prefix=prefix,
suffix=fmt["extension"],
dir=directory,
delete=False,
)
try:
tmp = NamedTemporaryFile(**tmp_file_args)
except TypeError:
# NamedTemporaryFile does not have an 'encoding' argument on pypy
tmp_file_args.pop("encoding")
tmp = NamedTemporaryFile(**tmp_file_args)
try:
tmp.write(text)
tmp.close()
exec_command(
[cmd if cmd != "{}" else tmp.name for cmd in command],
capture=update,
warn_only=warn_only,
)
if not update:
return notebook
piped_notebook = read(tmp.name, fmt=fmt)
finally:
os.remove(tmp.name)
else:
cmd_output = exec_command(
command, text.encode("utf-8"), capture=update, warn_only=warn_only
)
if not update:
return notebook
if not cmd_output:
sys.stderr.write(
"[jupytext] The command '{}' had no output. As a result, the notebook is empty. "
"Is this expected? If not, use --check rather than --pipe for this command.".format(
command
)
)
piped_notebook = reads(cmd_output.decode("utf-8"), fmt)
if fmt["extension"] != ".ipynb":
piped_notebook = combine_inputs_with_outputs(piped_notebook, notebook, fmt)
# Remove jupytext / text_representation entry
if "jupytext" in notebook.metadata:
piped_notebook.metadata["jupytext"] = notebook.metadata["jupytext"]
else:
piped_notebook.metadata.pop("jupytext", None)
return piped_notebook
def execution_counts_are_in_order(notebook):
"""Returns True if all the code cells have an execution count, ordered from 1 to N with no missing number"""
expected_execution_count = 1
for cell in notebook.cells:
if cell.cell_type == "code":
if cell.execution_count != expected_execution_count:
return False
expected_execution_count += 1
return True
def code_cells_have_changed(notebook, nb_files):
"""The source for the code cells has not changed"""
for nb_file in nb_files:
if not os.path.exists(nb_file):
return True
nb_ref = read(nb_file)
# Are the new code cells equals to those in the file?
ref = [cell.source for cell in nb_ref.cells if cell.cell_type == "code"]
new = [cell.source for cell in notebook.cells if cell.cell_type == "code"]
if ref != new:
return True
return False
@language nim
sub: RootObj = huh #
n: Natural = range(0..9)
r: Rune = 'é'
a: any = none
b: bool = false
s: string = "Single string"
f1: float64 = 0.0d
f2: float32 = -.0f32
f3: float = 1f+1
i: int = 1_000_000
u'abd = 2u32
var x = 5'u4
c = 'abc'
#[ A multi-line comment.
#[
Blah.
]#
]#
# GitHub colors all other constants blue, including system constants:
42
3.5
AllocStats
DivByZeroError
BiggestUInt
Slice
SomeFloat
# GitHub colors system functions purple:
echo "Hi"
alloc()
# GitHub does not color modules that "system" imports.
import exceptions # Imported by system module.
import Exception # Defined in system module.
def metadata_to_double_percent_options(metadata, plain_json):
"""Metadata to double percent lines"""
text = []
if "title" in metadata:
text.append(metadata.pop("title"))
if "cell_depth" in metadata:
text.insert(0, "%" * metadata.pop("cell_depth"))
if "cell_type" in metadata:
text.append(
"[{}]".format(metadata.pop("region_name", metadata.pop("cell_type")))
)
return metadata_to_text(" ".join(text), metadata, plain_json=plain_json)
def jupytext(args=None):
"""Entry point for the jupytext script"""
args = parse_jupytext_args(args)
def log(text):
if not args.quiet:
sys.stdout.write(text + "\n")
if args.version:
log(__version__)
return 0
if args.pre_commit:
warnings.warn(
"The --pre-commit argument is deprecated. "
"Please consider switching to the pre-commit.com framework "
"(let us know at https://github.com/mwouts/jupytext/issues "
"if that is an issue for you)",
DeprecationWarning,
)
if args.notebooks:
raise ValueError(
"--pre-commit takes notebooks from the git index. Do not pass any notebook here."
)
args.notebooks = notebooks_in_git_index(args.input_format)
log("[jupytext] Notebooks in git index are:")
for nb_file in args.notebooks:
log(nb_file)
# Read notebook from stdin
if not args.notebooks:
if not args.pre_commit:
args.notebooks = ["-"]
if args.set_formats is not None:
# Replace empty string with None
args.update_metadata = recursive_update(
args.update_metadata, {"jupytext": {"formats": args.set_formats or None}}
)
args.sync = True
if args.paired_paths:
if len(args.notebooks) != 1:
raise ValueError("--paired-paths applies to a single notebook")
print_paired_paths(args.notebooks[0], args.input_format)
return 1
if args.run_path:
args.execute = True
if (
(args.test or args.test_strict)
and not args.output_format
and not args.output
and not args.sync
):
raise ValueError("Please provide one of --to, --output or --sync")
if (
not args.output_format
and not args.output
and not args.sync
and not args.pipe
and not args.diff
and not args.check
and not args.update_metadata
and not args.format_options
and not args.set_kernel
and not args.execute
):
raise ValueError(
"Please provide one of --to, --output, --set-formats, --sync, --pipe, --diff, "
"--check, --update-metadata, --format-options, --set-kernel or --execute"
)
if args.diff:
if (
len(args.notebooks) != 2
or args.output_format
or args.output
or args.sync
or args.pipe
or args.check
or args.update_metadata
or args.format_options
or args.set_kernel
or args.execute
):
raise ValueError(
"Please provide two notebooks after 'jupytext --diff'.\n"
"NB: Use --show-changes if you wish to see the changes in "
"a notebook being updated by Jupytext."
)
nb_file1, nb_file2 = args.notebooks
nb1 = read(nb_file1)
nb2 = read(nb_file2)
def fmt_if_not_ipynb(nb):
fmt = nb.metadata["jupytext"]["text_representation"]
if fmt["extension"] == ".ipynb":
return None
return short_form_one_format(fmt)
diff_fmt = (
args.diff_format or fmt_if_not_ipynb(nb1) or fmt_if_not_ipynb(nb2) or "md"
)
diff = compare(
writes(nb2, diff_fmt),
writes(nb1, diff_fmt),
nb_file2,
nb_file1,
return_diff=True,
)
sys.stdout.write(diff)
return
if args.output and len(args.notebooks) != 1:
raise ValueError("Please input a single notebook when using --output")
# Warn if '--to' is used in place of '--output'
if (
not args.output
and args.output_format
and "." in args.output_format
# a suffix is expected to start with one of these characters #901
and not args.output_format.startswith((".", "-", "_"))
and "//" not in args.output_format
):
def single_line(msg, *args, **kwargs):
return f"[warning] {msg}\n"
warnings.formatwarning = single_line
warnings.warn(
"You might have passed a file name to the '--to' option, "
"when a format description was expected. Maybe you want to use the '-o' option instead?"
)
if args.input_format:
args.input_format = long_form_one_format(args.input_format)
if args.output_format:
args.output_format = long_form_one_format(args.output_format)
set_format_options(args.output_format, args.format_options)
# Wildcard extension on Windows #202
notebooks = []
for pattern in args.notebooks:
if "*" in pattern or "?" in pattern:
# Exclude the .jupytext.py configuration file
notebooks.extend(glob.glob(pattern, recursive=True))
else:
notebooks.append(pattern)
# Count how many file have round-trip issues when testing
exit_code = 0
for nb_file in notebooks:
if not args.warn_only:
exit_code += jupytext_single_file(nb_file, args, log)
else:
try:
exit_code += jupytext_single_file(nb_file, args, log)
except Exception as err:
sys.stderr.write(f"[jupytext] Error: {str(err)}\n")
return exit_code
def jupytext_single_file(nb_file, args, log):
"""Apply the jupytext command, with given arguments, to a single file"""
if nb_file == "-" and args.sync:
msg = "Missing notebook path."
if args.set_formats is not None and os.path.isfile(args.set_formats):
msg += f" Maybe you mean 'jupytext --sync {args.set_formats}' ?"
raise ValueError(msg)
nb_dest = None
if args.output:
nb_dest = args.output
elif nb_file == "-":
nb_dest = "-"
else:
try:
bp = base_path(nb_file, args.input_format)
except InconsistentPath:
if args.pre_commit_mode:
log(
"[jupytext] Ignoring unmatched input path {}{}".format(
nb_file,
f" for format {args.input_format}" if args.input_format else "",
)
)
return 0
raise
if args.output_format:
nb_dest = full_path(bp, args.output_format)
config = load_jupytext_config(os.path.abspath(nb_file))
# Just acting on metadata / pipe => save in place
save_in_place = not nb_dest and not args.sync
if save_in_place:
nb_dest = nb_file
if nb_dest == "-":
args.quiet = True
# I. ### Read the notebook ###
fmt = copy(args.input_format) or {}
if not fmt:
ext = os.path.splitext(nb_file)[1]
if ext:
fmt = {"extension": ext}
if fmt:
set_format_options(fmt, args.format_options)
log(
"[jupytext] Reading {}{}".format(
nb_file if nb_file != "-" else "stdin",
f" in format {short_form_one_format(fmt)}" if "extension" in fmt else "",
)
)
notebook = read(nb_file, fmt=fmt, config=config)
if "extension" in fmt and "format_name" not in fmt:
text_representation = notebook.metadata.get("jupytext", {}).get(
"text_representation", {}
)
if text_representation.get("extension") == fmt["extension"]:
fmt["format_name"] = text_representation["format_name"]
# Compute actual extension when using script/auto, and update nb_dest if necessary
dest_fmt = args.output_format
if dest_fmt and dest_fmt["extension"] == ".auto":
dest_fmt = check_auto_ext(dest_fmt, notebook.metadata, "--to")
if not args.output and nb_file != "-":
nb_dest = full_path(base_path(nb_file, args.input_format), dest_fmt)
# Set the kernel
set_kernel = args.set_kernel
if (
(not set_kernel)
and args.execute
and notebook.metadata.get("kernelspec", {}).get("name") is None
):
set_kernel = "-"
if set_kernel:
if set_kernel == "-":
language = (
notebook.metadata.get("jupytext", {}).get("main_language")
or notebook.metadata["kernelspec"]["language"]
)
if not language:
raise ValueError(
"Cannot infer a kernel as notebook language is not defined"
)
kernelspec = kernelspec_from_language(language)
else:
try:
kernelspec = get_kernel_spec(set_kernel)
except KeyError as err:
raise KeyError(
"Please choose a kernel name among {}".format(
find_kernel_specs().keys()
)
) from err
kernelspec = {
"name": args.set_kernel,
"language": kernelspec.language,
"display_name": kernelspec.display_name,
}
log("[jupytext] Setting kernel {}".format(kernelspec.get("name")))
args.update_metadata["kernelspec"] = kernelspec
# Are we updating a text file that has a metadata filter? #212
if args.update_metadata or args.format_options:
if (
notebook.metadata.get("jupytext", {}).get("notebook_metadata_filter")
== "-all"
):
notebook.metadata.get("jupytext", {}).pop("notebook_metadata_filter")
# Update the metadata
if args.update_metadata:
log(
"[jupytext] Updating notebook metadata with '{}'".format(
json.dumps(args.update_metadata)
)
)
if (
"kernelspec" in args.update_metadata
and "main_language" in notebook.metadata.get("jupytext", {})
):
notebook.metadata["jupytext"].pop("main_language")
recursive_update(notebook.metadata, args.update_metadata)
# Read paired notebooks
nb_files = [nb_file, nb_dest]
if args.sync:
formats = notebook_formats(
notebook, config, nb_file, fallback_on_current_fmt=False
)
set_prefix_and_suffix(fmt, formats, nb_file)
try:
notebook, inputs_nb_file, outputs_nb_file = load_paired_notebook(
notebook, fmt, config, formats, nb_file, log, args.pre_commit_mode
)
nb_files = [inputs_nb_file, outputs_nb_file]
except NotAPairedNotebook as err:
sys.stderr.write("[jupytext] Warning: " + str(err) + "\n")
return 0
except InconsistentVersions as err:
sys.stderr.write("[jupytext] Error: " + str(err) + "\n")
return 1
# II. ### Apply commands onto the notebook ###
# Pipe the notebook into the desired commands
if nb_file == "-":
prefix = None
directory = None
else:
prefix = os.path.splitext(os.path.basename(nb_file))[0]
directory = os.path.dirname(nb_file)
for cmd in args.pipe or []:
notebook = pipe_notebook(
notebook,
cmd,
args.pipe_fmt,
prefix=prefix,
directory=directory,
warn_only=args.warn_only,
)
# and/or test the desired commands onto the notebook
for cmd in args.check or []:
pipe_notebook(
notebook,
cmd,
args.pipe_fmt,
update=False,
prefix=prefix,
directory=directory,
warn_only=args.warn_only,
)
if (
args.execute
and args.pre_commit_mode
and execution_counts_are_in_order(notebook)
and not code_cells_have_changed(notebook, nb_files)
):
log(
f"[jupytext] Execution of {shlex.quote(nb_file)} "
f"skipped as code cells have not changed and outputs are present."
)
args.execute = False
# Execute the notebook
if args.execute:
kernel_name = notebook.metadata.get("kernelspec", {}).get("name")
log(f"[jupytext] Executing notebook with kernel {kernel_name}")
if nb_dest is not None and nb_dest != "-":
nb_path = os.path.dirname(nb_dest)
elif nb_file != "-":
nb_path = os.path.dirname(nb_file)
else:
nb_path = None
run_path = args.run_path or nb_path
if args.run_path and not os.path.isdir(run_path):
# is this a relative directory?
for base_dir in [nb_path, os.getcwd()]:
try_path = os.path.join(base_dir, run_path)
if os.path.isdir(try_path):
run_path = try_path
break
if not os.path.isdir(run_path):
raise ValueError(f"--run-path={args.run_path} is not a valid path")
if run_path:
resources = {"metadata": {"path": run_path}}
else:
resources = {}
try:
from nbconvert.preprocessors import ExecutePreprocessor
exec_proc = ExecutePreprocessor(timeout=None, kernel_name=kernel_name)
exec_proc.preprocess(notebook, resources=resources)
except (ImportError, RuntimeError) as err:
if args.pre_commit_mode:
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that you have listed 'nbconvert' and 'ipykernel' "
"under 'additional_dependencies' in the jupytext hook."
) from err
raise RuntimeError(
"An error occurred while executing the notebook. Please "
"make sure that 'nbconvert' and 'ipykernel' are installed."
) from err
# III. ### Possible actions ###
# a. Test round trip conversion
if args.test or args.test_strict:
try:
# Round trip from an ipynb document
if fmt["extension"] == ".ipynb":
test_round_trip_conversion(
notebook,
dest_fmt,
update=args.update,
allow_expected_differences=not args.test_strict,
stop_on_first_error=args.stop_on_first_error,
)
# Round trip from a text file
else:
with open(nb_file, encoding="utf-8") as fp:
org_text = fp.read()
# If the destination is not ipynb, we convert to/back that format
if dest_fmt["extension"] != ".ipynb":
dest_text = writes(notebook, fmt=dest_fmt)
notebook = reads(dest_text, fmt=dest_fmt)
text = writes(notebook, fmt=fmt, config=config)
if args.test_strict:
compare(text, org_text)
else:
# we ignore the YAML header in the comparison #414
comment = _SCRIPT_EXTENSIONS.get(fmt["extension"], {}).get(
"comment", ""
)
# white spaces between the comment char and the YAML delimiters are allowed
if comment:
comment = comment + r"\s*"
yaml_header = re.compile(
r"^{comment}---\s*\n.*\n{comment}---\s*\n".format(
comment=comment
),
re.MULTILINE | re.DOTALL,
)
compare(
re.sub(yaml_header, "", text), re.sub(yaml_header, "", org_text)
)
except (NotebookDifference, AssertionError) as err:
sys.stdout.write(f"{nb_file}: {str(err)}")
return 1
return 0
# b. Output to the desired file or format
untracked_files = 0
def lazy_write(path, fmt=None, action=None, update_timestamp_only=False):
"""Write the notebook only if it has changed"""
if path == "-":
write(notebook, "-", fmt=fmt)
return
nonlocal untracked_files
if update_timestamp_only:
modified = False
else:
_, ext = os.path.splitext(path)
fmt = copy(fmt or {})
fmt = long_form_one_format(fmt, update={"extension": ext})
new_content = writes(notebook, fmt=fmt, config=config)
diff = None
if not new_content.endswith("\n"):
new_content += "\n"
if not os.path.isfile(path):
modified = True
else:
with open(path, encoding="utf-8") as fp:
current_content = fp.read()
modified = new_content != current_content
if modified and args.show_changes:
diff = compare(
new_content,
current_content,
"",
"",
return_diff=True,
)
if modified:
# The text representation of the notebook has changed, we write it on disk
if action is None:
message = f"[jupytext] Updating {shlex.quote(path)}"
else:
message = "[jupytext] Writing {path}{format}{action}".format(
path=shlex.quote(path),
format=" in format " + short_form_one_format(fmt)
if fmt and "format_name" in fmt
else "",
action=action,
)
if diff is not None:
message += " with this change:\n" + diff
log(message)
create_prefix_dir(path, fmt)
with open(path, "w", encoding="utf-8") as fp:
fp.write(new_content)
# Otherwise, we only update the timestamp of the text file to make sure
# they remain more recent than the ipynb file, for compatibility with the
# Jupytext contents manager for Jupyter
if args.use_source_timestamp:
log(
f"[jupytext] Setting the timestamp of {shlex.quote(path)} equal to that of {shlex.quote(nb_file)}"
)
os.utime(path, (os.stat(path).st_atime, os.stat(nb_file).st_mtime))
elif not modified:
if path.endswith(".ipynb"):
# No need to update the timestamp of ipynb files
log(f"[jupytext] Unchanged {shlex.quote(path)}")
elif args.sync:
# if the content is unchanged (and matches ipynb), we don't need
# to update the timestamp as the contents manager will not throw in
# that case (see the try/catch on read_pair(... must_match=True))
log(f"[jupytext] Unchanged {shlex.quote(path)}")
else:
log(f"[jupytext] Updating the timestamp of {shlex.quote(path)}")
os.utime(path, None)
if args.pre_commit:
system("git", "add", path)
if args.pre_commit_mode and is_untracked(path):
log(
f"[jupytext] Error: the git index is outdated.\n"
f"Please add the paired notebook with:\n"
f" git add {shlex.quote(path)}"
)
untracked_files += 1
return
if nb_dest:
if nb_dest == nb_file and not dest_fmt:
dest_fmt = fmt
# Test consistency between dest name and output format
if dest_fmt and nb_dest != "-":
base_path(nb_dest, dest_fmt)
# Describe what jupytext is doing
if save_in_place:
action = ""
elif os.path.isfile(nb_dest) and args.update:
if not nb_dest.endswith(".ipynb"):
raise ValueError("--update is only for ipynb files")
action = " (destination file updated)"
check_file_version(notebook, nb_file, nb_dest)
notebook = combine_inputs_with_outputs(notebook, read(nb_dest), fmt=fmt)
elif os.path.isfile(nb_dest):
suggest_update = (
" [use --update to preserve cell outputs and ids]"
if nb_dest.endswith(".ipynb")
else ""
)
action = f" (destination file replaced{suggest_update})"
else:
action = ""
formats = notebook.metadata.get("jupytext", {}).get("formats")
formats = long_form_multiple_formats(formats)
if formats:
try:
base_path_out, _ = find_base_path_and_format(nb_dest, formats)
except InconsistentPath:
# Drop 'formats' if the destination is not part of the paired notebooks
formats = {}
notebook.metadata.get("jupytext", {}).pop("formats")
lazy_write(nb_dest, fmt=dest_fmt, action=action)
nb_dest_in_pair = formats and any(
os.path.exists(alt_path) and os.path.samefile(nb_dest, alt_path)
for alt_path, _ in paired_paths(nb_file, fmt, formats)
)
if (
nb_dest_in_pair
and os.path.isfile(nb_file)
and not nb_file.endswith(".ipynb")
and os.path.isfile(nb_dest)
and nb_dest.endswith(".ipynb")
):
# If the destination is an ipynb file and is in the pair, then we
# update the original text file timestamp, as required by our Content Manager
# Otherwise Jupyter will refuse to open the paired notebook #335
# NB: An alternative is --use-source-timestamp
lazy_write(nb_file, update_timestamp_only=True)
# c. Synchronize paired notebooks
elif args.sync:
write_pair(nb_file, formats, lazy_write)
return untracked_files
"""Read and write Jupyter notebooks as text files"""
import logging
import os
import sys
import warnings
from copy import copy, deepcopy
import nbformat
from nbformat.v4.nbbase import NotebookNode, new_code_cell, new_notebook
from nbformat.v4.rwbase import NotebookReader, NotebookWriter
from .cell_metadata import _IGNORE_CELL_METADATA
from .formats import (
_VALID_FORMAT_OPTIONS,
divine_format,
format_name_for_ext,
get_format_implementation,
guess_format,
long_form_one_format,
read_format_from_metadata,
rearrange_jupytext_metadata,
update_jupytext_formats_metadata,
)
from .header import (
encoding_and_executable,
header_to_metadata_and_cell,
insert_jupytext_info_and_filter_metadata,
insert_or_test_version_number,
metadata_and_cell_to_header,
)
from .languages import (
default_language_from_metadata_and_ext,
set_main_and_cell_language,
)
from .metadata_filter import filter_metadata, update_metadata_filters
from .myst import MYST_FORMAT_NAME, myst_extensions, myst_to_notebook, notebook_to_myst
from .pandoc import md_to_notebook, notebook_to_md
from .pep8 import pep8_lines_between_cells
from .quarto import notebook_to_qmd, qmd_to_notebook
from .version import __version__
@others
@language python
@tabwidth -4
class NotSupportedNBFormatVersion(NotImplementedError):
"""An error issued when the current notebook format is not supported by this version of Jupytext"""
class TextNotebookConverter(NotebookReader, NotebookWriter):
"""A class that can read or write a Jupyter notebook as text"""
@others
def incorrectly_encoded_metadata(text):
"""Encode a text that Jupytext cannot parse as a cell metadata"""
return {"incorrectly_encoded_metadata": text}
def __init__(self, fmt, config):
self.fmt = copy(long_form_one_format(fmt))
self.config = config
self.ext = self.fmt["extension"]
self.implementation = get_format_implementation(
self.ext, self.fmt.get("format_name")
)
def update_fmt_with_notebook_options(self, metadata, read=False):
"""Update format options with the values in the notebook metadata, and record those
options in the notebook metadata"""
# The settings in the Jupytext configuration file have precedence over the metadata in the notebook
# when the notebook is saved. This is because the metadata in the notebook might not be visible
# in the text representation when e.g. notebook_metadata_filter="-all", which makes them hard to edit.
if not read and self.config is not None:
self.config.set_default_format_options(self.fmt, read)
# Use format options from the notebook if not already set by the config
for opt in _VALID_FORMAT_OPTIONS:
if opt in metadata.get("jupytext", {}):
self.fmt.setdefault(opt, metadata["jupytext"][opt])
# When we read the notebook we use the values of the config as defaults, as again the text representation
# of the notebook might not store the format options when notebook_metadata_filter="-all"
if read and self.config is not None:
self.config.set_default_format_options(self.fmt, read=read)
# We save the format options in the notebook metadata
for opt in _VALID_FORMAT_OPTIONS:
if opt in self.fmt:
metadata.setdefault("jupytext", {})[opt] = self.fmt[opt]
# Is this format the same as that documented in the YAML header? If so, we want to know the format version
file_fmt = metadata.get("jupytext", {}).get("text_representation", {})
if self.fmt.get("extension") == file_fmt.get("extension") and self.fmt.get(
"format_name"
) == file_fmt.get("format_name"):
self.fmt.update(file_fmt)
# rST to md conversion should happen only once
if metadata.get("jupytext", {}).get("rst2md") is True:
metadata["jupytext"]["rst2md"] = False
def reads(self, s, **_):
"""Read a notebook represented as text"""
if self.fmt.get("format_name") == "pandoc":
return md_to_notebook(s)
if self.fmt.get("format_name") == "quarto":
return qmd_to_notebook(s)
if self.fmt.get("format_name") == MYST_FORMAT_NAME:
return myst_to_notebook(s)
lines = s.splitlines()
cells = []
metadata, jupyter_md, header_cell, pos = header_to_metadata_and_cell(
lines,
self.implementation.header_prefix,
self.implementation.header_suffix,
self.implementation.extension,
self.fmt.get(
"root_level_metadata_as_raw_cell",
self.config.root_level_metadata_as_raw_cell
if self.config is not None
else True,
),
)
default_language = default_language_from_metadata_and_ext(
metadata, self.implementation.extension
)
self.update_fmt_with_notebook_options(metadata, read=True)
if header_cell:
cells.append(header_cell)
lines = lines[pos:]
if (
self.implementation.format_name
and self.implementation.format_name.startswith("sphinx")
):
cells.append(new_code_cell(source="%matplotlib inline"))
cell_metadata_json = False
while lines:
reader = self.implementation.cell_reader_class(self.fmt, default_language)
cell, pos = reader.read(lines)
cells.append(cell)
cell_metadata_json = cell_metadata_json or reader.cell_metadata_json
if pos <= 0:
raise Exception(
"Blocked at lines " + "\n".join(lines[:6])
) # pragma: no cover
lines = lines[pos:]
custom_cell_magics = self.fmt.get("custom_cell_magics", "").split(",")
set_main_and_cell_language(
metadata, cells, self.implementation.extension, custom_cell_magics
)
cell_metadata = set()
for cell in cells:
cell_metadata.update(cell.metadata.keys())
update_metadata_filters(metadata, jupyter_md, cell_metadata)
if cell_metadata_json:
metadata.setdefault("jupytext", {}).setdefault("cell_metadata_json", True)
if (
self.implementation.format_name
and self.implementation.format_name.startswith("sphinx")
):
filtered_cells = []
for i, cell in enumerate(cells):
if (
cell.source == ""
and i > 0
and i + 1 < len(cells)
and cells[i - 1].cell_type != "markdown"
and cells[i + 1].cell_type != "markdown"
):
continue
filtered_cells.append(cell)
cells = filtered_cells
return new_notebook(cells=cells, metadata=metadata)
def filter_notebook(self, nb, metadata, preserve_cell_ids=False):
self.update_fmt_with_notebook_options(nb.metadata)
unsupported_keys = set()
metadata = insert_jupytext_info_and_filter_metadata(
metadata, self.fmt, self.implementation, unsupported_keys=unsupported_keys
)
cells = []
for cell in nb.cells:
cell_metadata = filter_metadata(
cell.metadata,
self.fmt.get("cell_metadata_filter"),
_IGNORE_CELL_METADATA,
unsupported_keys=unsupported_keys,
)
if preserve_cell_ids and hasattr(cell, "id"):
id = {"id": cell.id}
else:
id = {}
if cell.cell_type == "code":
cells.append(
new_code_cell(source=cell.source, metadata=cell_metadata, **id)
)
else:
cells.append(
NotebookNode(
source=cell.source,
metadata=cell_metadata,
cell_type=cell.cell_type,
**id,
)
)
_warn_on_unsupported_keys(unsupported_keys)
return NotebookNode(
nbformat=nb.nbformat,
nbformat_minor=nb.nbformat_minor,
metadata=metadata,
cells=cells,
)
def writes(self, nb, metadata=None, **kwargs):
"""Return the text representation of the notebook"""
if self.fmt.get("format_name") == "pandoc":
return notebook_to_md(
self.filter_notebook(nb, metadata, preserve_cell_ids=True)
)
if self.fmt.get("format_name") == "quarto" or self.ext == ".qmd":
return notebook_to_qmd(self.filter_notebook(nb, metadata))
if self.fmt.get(
"format_name"
) == MYST_FORMAT_NAME or self.ext in myst_extensions(no_md=True):
pygments_lexer = metadata.get("language_info", {}).get(
"pygments_lexer", None
)
return notebook_to_myst(
self.filter_notebook(nb, metadata),
default_lexer=pygments_lexer,
)
# Copy the notebook, in order to be sure we do not modify the original notebook
nb = NotebookNode(
nbformat=nb.nbformat,
nbformat_minor=nb.nbformat_minor,
metadata=deepcopy(metadata or nb.metadata),
cells=nb.cells,
)
metadata = nb.metadata
default_language = (
default_language_from_metadata_and_ext(
metadata, self.implementation.extension, True
)
or "python"
)
self.update_fmt_with_notebook_options(nb.metadata)
if "use_runtools" not in self.fmt:
for cell in nb.cells:
if cell.metadata.get("hide_input", False) or cell.metadata.get(
"hide_output", False
):
self.fmt["use_runtools"] = True
break
header = encoding_and_executable(nb, metadata, self.ext)
unsupported_keys = set()
header_content, header_lines_to_next_cell = metadata_and_cell_to_header(
nb,
metadata,
self.implementation,
self.fmt,
unsupported_keys=unsupported_keys,
)
header.extend(header_content)
cell_exporters = []
looking_for_first_markdown_cell = (
self.implementation.format_name
and self.implementation.format_name.startswith("sphinx")
)
split_at_heading = self.fmt.get("split_at_heading", False)
for cell in nb.cells:
if looking_for_first_markdown_cell and cell.cell_type == "markdown":
cell.metadata.setdefault("cell_marker", '"""')
looking_for_first_markdown_cell = False
cell_exporters.append(
self.implementation.cell_exporter_class(
cell, default_language, self.fmt, unsupported_keys=unsupported_keys
)
)
_warn_on_unsupported_keys(unsupported_keys)
texts = [cell.cell_to_text() for cell in cell_exporters]
lines = []
# concatenate cells in reverse order to determine how many blank lines (pep8)
for i, cell in reversed(list(enumerate(cell_exporters))):
text = cell.remove_eoc_marker(texts[i], lines)
if (
i == 0
and self.implementation.format_name
and self.implementation.format_name.startswith("sphinx")
and (text in [["%matplotlib inline"], ["# %matplotlib inline"]])
):
continue
lines_to_next_cell = cell.lines_to_next_cell
if lines_to_next_cell is None:
lines_to_next_cell = pep8_lines_between_cells(
text, lines, self.implementation.extension
)
text.extend([""] * lines_to_next_cell)
# two blank lines between markdown cells in Rmd when those do not have explicit region markers
if self.ext in [".md", ".markdown", ".Rmd"] and not cell.is_code():
if (
i + 1 < len(cell_exporters)
and not cell_exporters[i + 1].is_code()
and not texts[i][0].startswith("<!-- #")
and not texts[i + 1][0].startswith("<!-- #")
and (
not split_at_heading
or not (texts[i + 1] and texts[i + 1][0].startswith("#"))
)
):
text.append("")
# "" between two consecutive code cells in sphinx
if self.implementation.format_name.startswith("sphinx") and cell.is_code():
if i + 1 < len(cell_exporters) and cell_exporters[i + 1].is_code():
text.append('""')
lines = text + lines
if header_lines_to_next_cell is None:
header_lines_to_next_cell = pep8_lines_between_cells(
header_content, lines, self.implementation.extension
)
header.extend([""] * header_lines_to_next_cell)
return "\n".join(header + lines)
def reads(text, fmt=None, as_version=nbformat.NO_CONVERT, config=None, **kwargs):
"""
Read a notebook from a string
:param text: the text representation of the notebook
:param fmt: (optional) the jupytext format like `md`, `py:percent`, ...
:param as_version: see nbformat.reads
:param config: (optional) a Jupytext configuration object
:param kwargs: (not used) additional parameters for nbformat.reads
:return: the notebook
"""
fmt = copy(fmt) if fmt else divine_format(text)
fmt = long_form_one_format(fmt)
ext = fmt["extension"]
if ext == ".ipynb":
nb = nbformat.reads(text, as_version, **kwargs)
(version, version_minor) = nbformat.reader.get_version(nb)
if version != 4:
warnings.warn(
f"Notebooks in nbformat version {version}.{version_minor} are not supported by Jupytext. "
f"Please consider converting them to nbformat version 4.x with "
f"'jupyter nbconvert --to notebook --inplace'"
)
return nb
format_name = read_format_from_metadata(text, ext) or fmt.get("format_name")
if format_name:
format_options = {}
else:
format_name, format_options = guess_format(text, ext)
if format_name:
fmt["format_name"] = format_name
fmt.update(format_options)
reader = TextNotebookConverter(fmt, config)
notebook = reader.reads(text, **kwargs)
rearrange_jupytext_metadata(notebook.metadata)
if format_name and insert_or_test_version_number():
notebook.metadata.setdefault("jupytext", {}).setdefault(
"text_representation", {}
).update({"extension": ext, "format_name": format_name})
return notebook
def read(fp, as_version=nbformat.NO_CONVERT, fmt=None, config=None, **kwargs):
"""Read a notebook from a file name or a file object
:param fp: a file name or a file object
:param as_version: see nbformat.read
:param fmt: (optional) the jupytext format like `md`, `py:percent`, ...
:param config: (optional) a Jupytext configuration object
:param kwargs: (not used) additional parameters for nbformat.read
:return: the notebook
"""
if as_version != nbformat.NO_CONVERT and not isinstance(as_version, int):
raise TypeError(
"Second argument 'as_version' should be either nbformat.NO_CONVERT, or an integer."
)
if fp == "-":
text = sys.stdin.read()
# Update the input format by reference if missing
if isinstance(fmt, dict) and not fmt:
fmt.update(long_form_one_format(divine_format(text)))
return reads(text, fmt)
if not hasattr(fp, "read"):
# Treat fp as a file name
fp = str(fp)
_, ext = os.path.splitext(fp)
fmt = copy(fmt or {})
if not isinstance(fmt, dict):
fmt = long_form_one_format(fmt)
fmt.update({"extension": ext})
with open(fp, encoding="utf-8") as stream:
return read(stream, as_version=as_version, fmt=fmt, config=config, **kwargs)
if fmt is not None:
fmt = long_form_one_format(fmt)
if fmt["extension"] == ".ipynb":
notebook = nbformat.read(fp, as_version, **kwargs)
rearrange_jupytext_metadata(notebook.metadata)
return notebook
return reads(fp.read(), fmt, config=config, **kwargs)
def writes(notebook, fmt, version=nbformat.NO_CONVERT, config=None, **kwargs):
"""Return the text representation of the notebook
:param notebook: the notebook
:param fmt: the jupytext format like `md`, `py:percent`, ...
:param version: see nbformat.writes
:param config: (optional) a Jupytext configuration object
:param kwargs: (not used) additional parameters for nbformat.writes
:return: the text representation of the notebook
"""
if version is not nbformat.NO_CONVERT:
if not isinstance(version, int):
raise TypeError(
"The argument 'version' should be either nbformat.NO_CONVERT, or an integer."
)
notebook = nbformat.convert(notebook, version)
(version, version_minor) = nbformat.reader.get_version(notebook)
if version < 4:
raise NotSupportedNBFormatVersion(
f"Notebooks in nbformat version {version}.{version_minor} are not supported by Jupytext. "
f"Please convert your notebooks to nbformat version 4 with "
f"'jupyter nbconvert --to notebook --inplace', or call this function with 'version=4'."
)
if version > 4 or (version == 4 and version_minor > 5):
warnings.warn(
f"Notebooks in nbformat version {version}.{version_minor} "
f"have not been tested with Jupytext version {__version__}."
)
metadata = deepcopy(notebook.metadata)
rearrange_jupytext_metadata(metadata)
fmt = copy(fmt)
fmt = long_form_one_format(fmt, metadata)
ext = fmt["extension"]
format_name = fmt.get("format_name")
if ext == ".ipynb":
return nbformat.writes(
drop_text_representation_metadata(notebook, metadata),
version,
**kwargs,
)
if not format_name:
format_name = format_name_for_ext(metadata, ext, explicit_default=False)
if format_name:
fmt["format_name"] = format_name
update_jupytext_formats_metadata(metadata, fmt)
writer = TextNotebookConverter(fmt, config)
return writer.writes(notebook, metadata)
def drop_text_representation_metadata(notebook, metadata=None):
"""When the notebook is saved to an ipynb file, we drop the text_representation metadata"""
if metadata is None:
# Make a copy to avoid modification by reference
metadata = deepcopy(notebook["metadata"])
jupytext_metadata = metadata.get("jupytext", {})
jupytext_metadata.pop("text_representation", {})
# Remove the jupytext section if empty
if not jupytext_metadata:
metadata.pop("jupytext", {})
return NotebookNode(
nbformat=notebook["nbformat"],
nbformat_minor=notebook["nbformat_minor"],
metadata=metadata,
cells=notebook["cells"],
)
def write(nb, fp, version=nbformat.NO_CONVERT, fmt=None, config=None, **kwargs):
"""Write a notebook to a file name or a file object
:param nb: the notebook
:param fp: a file name or a file object
:param version: see nbformat.write
:param fmt: (optional if fp is a file name) the jupytext format like `md`, `py:percent`, ...
:param config: (optional) a Jupytext configuration object
:param kwargs: (not used) additional parameters for nbformat.write
"""
if fp == "-":
# Use sys.stdout.buffer when possible, and explicit utf-8 encoding, cf. #331
content = writes(nb, version=version, fmt=fmt, config=config, **kwargs)
try:
# Python 3
sys.stdout.buffer.write(content.encode("utf-8"))
except AttributeError:
sys.stdout.write(content.encode("utf-8"))
return
if not hasattr(fp, "write"):
# Treat fp as a file name
fp = str(fp)
_, ext = os.path.splitext(fp)
fmt = copy(fmt or {})
fmt = long_form_one_format(fmt, update={"extension": ext})
create_prefix_dir(fp, fmt)
with open(fp, "w", encoding="utf-8") as stream:
write(nb, stream, version=version, fmt=fmt, config=config, **kwargs)
return
else:
assert (
fmt is not None
), "'fmt' argument in jupytext.write is mandatory unless fp is a file name"
content = writes(nb, version=version, fmt=fmt, config=config, **kwargs)
if isinstance(content, bytes):
content = content.decode("utf8")
fp.write(content)
if not content.endswith("\n"):
fp.write("\n")
def is_identifier(text):
return bool(_IS_IDENTIFIER.match(text))
def create_prefix_dir(nb_file, fmt):
"""Create directory if fmt has a prefix"""
if "prefix" in fmt:
nb_dir = os.path.dirname(nb_file) + os.path.sep
if not os.path.isdir(nb_dir):
logging.log(
logging.WARNING, "[jupytext] creating missing directory %s", nb_dir
)
os.makedirs(nb_dir)
def _warn_on_unsupported_keys(unsupported_keys):
if unsupported_keys:
warnings.warn(
f"The following metadata cannot be exported "
f"to the text notebook: {sorted(unsupported_keys)}"
)
import pdb
if 1:
# print contents of jupytext's config file.
g.app.jupytextManager.get_jupytext_config_file()
if 0:
<< Use cli: --to fmt ekr-small-test.ipynb >>
if 0:
# Use API.
g.cls()
# Imports from jupytext.py
# import nbformat
# from nbformat.v4.nbbase import NotebookNode, new_code_cell, new_notebook
# EKR: really nbformat.notebooknode.NotebookNode
# from nbformat.v4.rwbase import NotebookReader, NotebookWriter
import jupytext
py_file = r'c:\test\ekr-small-test.py'
ipynb_file = r'c:\test\ekr-small-test.ipynb'
# read a notebook (class nbformat.notebooknode.NotebookNode) from a file
nb = jupytext.read(ipynb_file, fmt='py:percent')
# keys are 'cells', 'metadata', 'nbformat', 'nbformat_minor'.
if 0:
for z in nb:
print(f"{z:>20} {nb[z]}")
if 1:
print('metadata...')
d = nb['metadata']
for z in d:
print(f"{z}: {g.objToString(d[z])}")
if 1:
print('')
print('cells...')
for i, cell in enumerate(nb['cells']):
print(f"cell {i}: {g.objToString(cell)}")
# write a notebook to a file in the 'py:percent' format
### jupytext.write(nb, "notebook.py", fmt="py:percent")
from jupytext.cli import jupytext as cli
kind = 'py' # 'py' or 'auto' ('auto' prevents 'percent').
fmt = 'percent' # 'percent' or 'light'
args = ['--to', f"{kind}:{fmt}", r'c:\test\ekr-small-test.ipynb']
if 0:
# b 6 @509 jupytext_single_file.
# b 7 @411
pdb.runcall(cli, args)
else:
cli(args)
@language python
# %%
# Leo created this prefix from @data jupyter-prefix.
# Jupyter's 'New Python Text Notebook with Percent Format` created this file.
# ---
# jupyter:
# jupytext:
# formats: py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.16.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
@language python
# %%
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.16.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown] editable=true slideshow={"slide_type": ""}
# # Creating a new notebook
# %% [markdown]
# 1. Open the command palette with the shortcut: `Ctrl/Command` + `Shift` + `P`
# 2. Search for the command `Create New Blank Notebook`
# %% editable=true slideshow={"slide_type": ""}
# How to get back to the start page
# %% editable=true slideshow={"slide_type": ""}
1. Open the command palette with the shortcut: `Ctrl/Command` + `Shift` + `P`
2. Search for the command `Python: Open Start Page`
# %% [markdown] editable=true slideshow={"slide_type": ""}
# # Getting started
# %% [markdown]
# You are currently viewing what we call our Notebook Editor. It is an interactive document based on Jupyter Notebooks that supports the intermixing of code, outputs and markdown documentation.
#
# This cell is a markdown cell. To edit the text in this cell, simply double click on the cell to change it into edit mode.
#
# The next cell below is a code cell. You can switch a cell between code and markdown by clicking on the code  /markdown  icons or using the keyboard shortcut `M` and `Y` respectively.
# %%
print("hello world")
# %% [markdown]
# * To execute the code in the cell above, click on the cell to select it and then either press the play  button in the cell toolbar, or use the keyboard shortcut `Ctrl/Command` + `Enter`.
# * To edit the code, just click in cell and start editing.
# * To add a new cell below, click the `Add Cell` icon  at the bottom left of the cell or enter command mode with the `ESC` Key and then use the keyboard shortcut `B` to create the new cell below.
#
# %% [markdown]
# # Features
# %% [markdown]
# **Variable explorer**
#
# To view all your active variables and their current values in the notebook, click on the variable explorer icon  in the top toolbar.
#
# 
#
# **Data Viewer**
#
# To view your data frame in a more visual "Excel" like format, open the variable explorer and to the left of any dataframe object, you will see the data viewer icon  which you can click to open the data viewer.
#
# 
#
# **Convert to Python File**
#
# To export your notebook to a Python file (.py), click on the `Convert to Python script` icon  in the top toolbar
#
# 
#
# **Plot Viewer**
#
# If you have a graph (such as matplotlib) in your output, you'll notice if you hover over the graph, the `Plot Viewer` icon  will appear in the top left. Click the icon to open up the graph in the Plotviewer which allows you to zoom on your plots and export it in formats such as png and jpeg.
#
# 
#
# **Switching Kernels**
#
# The notebook editor will detect all kernels in your system by default. To change your notebook kernel, click on the kernel status in the top toolbar at the far right. For example, your kernel status may say "Python 3: Idle". This will open up the kernel selector where you can choose your desired kernel.
#
# 
#
# **Remote Jupyter Server**
#
# To connect to a remote Jupyter server, open the command prompt and search for the command `Specify remote or local Jupyter server for connections`. Then select `Existing` and enter the remote Jupyter server URL. Afterwards, you'll be prompted to reload the window and the Notebook will be opened connected to the remote Jupyter server.
#
# 
# %% [markdown] colab_type="text" id="-Rh3-Vt9Nev9"
# # More Resources
# %% [markdown]
# - [Data science tutorial for Visual Studio Code](https://code.visualstudio.com/docs/python/data-science-tutorial)
# - [Jupyter Notebooks in Visual Studio Code documentation](https://code.visualstudio.com/docs/python/jupyter-support)
@language python
# %%
# A leading (misleading?) comment.
<< preamble >>
@others
@language json
# %%
# ---
# jupyter:
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
def is_valid_metadata_key(text):
"""Can this text be a proper key?"""
return bool(_IS_VALID_METADATA_KEY.match(text))
@language python
# %%
2 + 666 + 4
# %%
print('hi changed externally')
# %% [markdown]
# This is a markdown cell
# %% [markdown]
# Another markdown cell
# %%
<< prefix >>
@others
@language python
@tabwidth -4
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.16.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# Loading and Analyzing Data
# ==========================
# %%
import pandas as pd # programmers like shortening names for things, so they usually import pandas as "pd"
import altair as alt # again with the shortening of names
import requests # we use it for downloading the data so we don't have to save it on GitHub (too big!)
import zipfile # for de-compressing the files we download from the EPA
# %% [markdown]
# ## Load the air quality data
# The EPA published PM2.5 daily summary files annually [on their website](https://aqs.epa.gov/aqsweb/airdata/download_files.html#Daily). This data is the "PM2.5 FRM/FEM Mass (88101)" dataset.
# Pandas understands what a CSV file is, so here we can just load them into two `DataFrame`s. A data frame is simply one representation of a table of data. It is the most important form of storage for working with data in pandas.
# %%
# Download the data from the EPA website
data_file_urls = [
'https://aqs.epa.gov/aqsweb/airdata/daily_88101_2020.zip',
'https://aqs.epa.gov/aqsweb/airdata/daily_88101_2019.zip'
]
# copied this example from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
for url in data_file_urls:
local_filename = "data/{}".format(url.split('/')[-1])
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
# and unzip the files
files_to_unzip = ["data/{}".format(url.split('/')[-1]) for url in data_file_urls]
for f in files_to_unzip:
with zipfile.ZipFile(f,"r") as zip_ref:
zip_ref.extractall("data")
def is_jupyter_language(language):
"""Is this a jupyter language?"""
for lang in _JUPYTER_LANGUAGES:
if language.lower() == lang.lower():
return True
return False
# %%
air_2019_df = pd.read_csv("data/daily_88101_2019.csv")
air_2020_df = pd.read_csv("data/daily_88101_2020.csv")
air_2020_df.head() # this helpfully prints out the first few rows with headers to preview the data
# %%
"{} rows for 2019, {} rows for 2020".format(air_2019_df.shape[0], air_2019_df.shape[0])
# %% [markdown]
# ## Aggregate and average MA data by city
# Let's compare MA data by city in 2020 and 2019.
# %%
# Step 1 - filter the data down by state name
is_MA_data = air_2020_df['State Name'] == "Massachusetts"
air_MA_2020_df = air_2020_df[is_MA_data]
is_MA_data = air_2019_df['State Name'] == "Massachusetts"
air_MA_2019_df = air_2019_df[is_MA_data]
"{} reports for MA in 2019, {} reports for MA in 2020".format(air_MA_2019_df.shape[0], air_MA_2020_df.shape[0])
# %%
# now trim down to just the columns we care about so it is easier to understand
interesting_columns = ['City Name', 'Latitude', 'Longitude', 'Arithmetic Mean']
air_MA_2020_df = pd.DataFrame(air_MA_2020_df, columns=interesting_columns)
air_MA_2019_df = pd.DataFrame(air_MA_2019_df, columns=interesting_columns)
air_MA_2019_df
# %%
# now group all the records by city and average them
avg_by_city_2020_MA_df = air_MA_2020_df.groupby('City Name').mean()\
.reset_index()\
.rename(columns={'City Name': 'City', 'Arithmetic Mean': 'Mean'})
avg_by_city_2019_MA_df = air_MA_2019_df.groupby('City Name').mean()\
.reset_index()\
.rename(columns={'City Name': 'City', 'Arithmetic Mean': 'Mean'})
# now we need to add in a year column so we can tell the data apart!
avg_by_city_2020_MA_df['year'] = 2020
avg_by_city_2019_MA_df['year'] = 2019
# now we can just contacetane the two dataframes to get all our data in one place
ma_city_avg_df = avg_by_city_2019_MA_df.append(avg_by_city_2020_MA_df)
ma_city_avg_df.to_csv('data/MA-city-year-avg.csv')
ma_city_avg_df
# %% [markdown]
# ## Visually Inspect the Data
# %%
alt.Chart(ma_city_avg_df, height=300).mark_bar().encode(
alt.X('year:N'),
alt.Y('Mean'),
color='year:N',
column=alt.Column(field='City', type='ordinal', spacing=10)
).properties(
title="MA City Average PM2.5 (by year)",
)
@language jupytext
<< prefix >>
@others
@tabwidth -4
@language jupytext
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.16.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
def parse_key_equal_value(text):
"""Parse a string of the form 'key1=value1 key2=value2'"""
# Empty metadata?
text = text.strip()
if not text:
return {}
last_space_pos = text.rfind(" ")
# Just an identifier?
if not text.startswith("--") and is_identifier(text[last_space_pos + 1 :]):
key = text[last_space_pos + 1 :]
value = None
result = {key: value}
if last_space_pos > 0:
result.update(parse_key_equal_value(text[:last_space_pos]))
return result
# Iterate on the '=' signs, starting from the right
equal_sign_pos = None
while True:
equal_sign_pos = text.rfind("=", None, equal_sign_pos)
if equal_sign_pos < 0:
return incorrectly_encoded_metadata(text)
# Do we have an identifier on the left of the equal sign?
prev_whitespace = text[:equal_sign_pos].rstrip().rfind(" ")
key = text[prev_whitespace + 1 : equal_sign_pos].strip()
if not is_valid_metadata_key(key):
continue
try:
value = relax_json_loads(text[equal_sign_pos + 1 :])
except (ValueError, SyntaxError):
# try with a longer expression
continue
# Combine with remaining metadata
metadata = (
parse_key_equal_value(text[:prev_whitespace]) if prev_whitespace > 0 else {}
)
# Append our value
metadata[key] = value
# And return
return metadata
@language jupytext
# %% [markdown]
# Loading and Analyzing Data
# ==========================
# %%
import pandas as pd # programmers like shortening names for things, so they usually import pandas as "pd"
import altair as alt # again with the shortening of names
import requests # we use it for downloading the data so we don't have to save it on GitHub (too big!)
import zipfile # for de-compressing the files we download from the EPA
# %% [markdown]
# ## Load the air quality data
# The EPA published PM2.5 daily summary files annually [on their website](https://aqs.epa.gov/aqsweb/airdata/download_files.html#Daily). This data is the "PM2.5 FRM/FEM Mass (88101)" dataset.
# Pandas understands what a CSV file is, so here we can just load them into two `DataFrame`s. A data frame is simply one representation of a table of data. It is the most important form of storage for working with data in pandas.
# %%
# Download the data from the EPA website
data_file_urls = [
'https://aqs.epa.gov/aqsweb/airdata/daily_88101_2020.zip',
'https://aqs.epa.gov/aqsweb/airdata/daily_88101_2019.zip'
]
# copied this example from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
for url in data_file_urls:
local_filename = "data/{}".format(url.split('/')[-1])
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
# and unzip the files
files_to_unzip = ["data/{}".format(url.split('/')[-1]) for url in data_file_urls]
for f in files_to_unzip:
with zipfile.ZipFile(f,"r") as zip_ref:
zip_ref.extractall("data")
# %%
air_2019_df = pd.read_csv("data/daily_88101_2019.csv")
air_2020_df = pd.read_csv("data/daily_88101_2020.csv")
air_2020_df.head() # this helpfully prints out the first few rows with headers to preview the data
# %%
"{} rows for 2019, {} rows for 2020".format(air_2019_df.shape[0], air_2019_df.shape[0])
# %% [markdown]
# ## Aggregate and average MA data by city
# Let's compare MA data by city in 2020 and 2019.
# %%
# Step 1 - filter the data down by state name
is_MA_data = air_2020_df['State Name'] == "Massachusetts"
air_MA_2020_df = air_2020_df[is_MA_data]
is_MA_data = air_2019_df['State Name'] == "Massachusetts"
air_MA_2019_df = air_2019_df[is_MA_data]
"{} reports for MA in 2019, {} reports for MA in 2020".format(air_MA_2019_df.shape[0], air_MA_2020_df.shape[0])
# %%
# now trim down to just the columns we care about so it is easier to understand
interesting_columns = ['City Name', 'Latitude', 'Longitude', 'Arithmetic Mean']
air_MA_2020_df = pd.DataFrame(air_MA_2020_df, columns=interesting_columns)
air_MA_2019_df = pd.DataFrame(air_MA_2019_df, columns=interesting_columns)
air_MA_2019_df
# %%
# now group all the records by city and average them
avg_by_city_2020_MA_df = air_MA_2020_df.groupby('City Name').mean()\
.reset_index()\
.rename(columns={'City Name': 'City', 'Arithmetic Mean': 'Mean'})
avg_by_city_2019_MA_df = air_MA_2019_df.groupby('City Name').mean()\
.reset_index()\
.rename(columns={'City Name': 'City', 'Arithmetic Mean': 'Mean'})
# now we need to add in a year column so we can tell the data apart!
avg_by_city_2020_MA_df['year'] = 2020
avg_by_city_2019_MA_df['year'] = 2019
# now we can just contacetane the two dataframes to get all our data in one place
ma_city_avg_df = avg_by_city_2019_MA_df.append(avg_by_city_2020_MA_df)
ma_city_avg_df.to_csv('data/MA-city-year-avg.csv')
ma_city_avg_df
def relax_json_loads(text, catch=False):
"""Parse a JSON string or similar"""
text = text.strip()
try:
return loads(text)
except JSONDecodeError:
pass
if not catch:
return ast.literal_eval(text)
try:
return ast.literal_eval(text)
except (ValueError, SyntaxError):
pass
return incorrectly_encoded_metadata(text)
# %% [markdown]
# ## Visually Inspect the Data
# %%
alt.Chart(ma_city_avg_df, height=300).mark_bar().encode(
alt.X('year:N'),
alt.Y('Mean'),
color='year:N',
column=alt.Column(field='City', type='ordinal', spacing=10)
).properties(
title="MA City Average PM2.5 (by year)",
)
# jm = g.app.jupytextManager
import io
import jupytext
path = r'c:/test/example-data-analysis.ipynb'
last = c.lastTopLevel()
p = last.insertAfter()
p.h = f"Test: {path}"
# The guts of jm.read.
fmt = 'py:percent'
notebook = jupytext.read(path, fmt=fmt)
with io.StringIO() as f:
jupytext.write(notebook, f, fmt=fmt)
contents = f.getvalue()
p.b = contents
c.redraw(p)
@language matlab
clear all, close all, clc
A=imread('../DATA/dog.jpg');
X=double(rgb2gray(A)); % Convert RBG->gray, 256 bit->double.
nx = size(X,1); ny = size(X,2);
imagesc(X), axis off, colormap gray
[U,S,V] = svd(X);
for r=[5 20 100]; % Truncation value
Xapprox = U(:,1:r)*S(1:r,1:r)*V(:,1:r)'; % Approx. image
figure, imagesc(Xapprox), axis off
title(['r=',num2str(r,'%d'),']);
end
%% f_ch01_ex02_2
subplot(1,2,1), semilogy(diag(S),'k')
subplot(1,2,2), plot(cumsum(diag(S))/sum(diag(S)),'k')
<< prefix >>
@others
@language jupytext
@tabwidth -4
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.16.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
from matplotlib.image import imread
import matplotlib.pyplot as plt
import numpy as np
import os
# %% [markdown]
#
# Section 1
# ---------
#
# Section 1, line 1.
# %%
plt.rcParams['figure.figsize'] = [16, 8]
A = imread(os.path.join('..','DATA','dog.jpg'))
X = np.mean(A, -1); # Convert RGB to grayscale
img = plt.imshow(X)
img.set_cmap('gray')
plt.axis('off')
plt.show()
# %%
U, S, VT = np.linalg.svd(X,full_matrices=False)
S = np.diag(S)
j = 0
for r in (5, 20, 100):
# Construct approximate image
Xapprox = U[:,:r] @ S[0:r,:r] @ VT[:r,:]
plt.figure(j+1)
j += 1
img = plt.imshow(Xapprox)
img.set_cmap('gray')
plt.axis('off')
plt.title('r = ' + str(r))
plt.show()
# %%
plt.figure(1)
plt.semilogy(np.diag(S))
plt.title('Singular Values')
plt.show()
plt.figure(2)
plt.plot(np.cumsum(np.diag(S))/np.sum(np.diag(S)))
plt.title('Singular Values: Cumulative Sum')
plt.show()
def is_json_metadata(text):
"""Is this a JSON metadata?"""
first_curly_bracket = text.find("{")
if first_curly_bracket < 0:
return False
first_equal_sign = text.find("=")
if first_equal_sign < 0:
return True
return first_curly_bracket < first_equal_sign
@language jupytext
# %% [markdown]
# Loading and Analyzing Data
# ==========================
A **bold** word.
# %%
import pandas as pd # A comment
def spam(s: str) -> str:
print(s)
spam('Hi')
# %% [markdown]
*Italics* and **bold**.
@language rest
@wrap
@nosearch
https://github.com/leo-editor/leo-editor/issues/4146
PR: https://github.com/leo-editor/leo-editor/pull/4147
- Item 1
- Item 2.
***A label.
.. comment.
@language python
def spam(s: str) -> str:
g.es_print(s)
spam('Hi')
@language glsl
if 1 {
}
@language python
def spam(s: str) -> str:
"""A docstring a"""
g.es_print(s)
spam('1234')
@language javascript
<script setup>
import { ref } from 'vue'
const greeting = ref('Hello World!')
</script>
@language html
<head>
<script setup>
import { ref } from 'vue'
const greeting = ref('Hello World!')
</script>
</head>
@language html
<!DOCTYPE html>
<html>
<body>
<h2>Width and Height Attributes</h2>
<p>
The width and height attributes of the img tag, defines the width and
height of the image:
</p>
<img src="img_girl.jpg" width="500" height="600">
</body>
</html>
@language javascript
<script setup>
import { ref } from 'vue'
const greeting = ref('Hello World!')
</script>
@language vue
<template>
<p class="greeting">{{ greeting }}</p>
</template>
@language css
<style>
body {background-color: powderblue;}
h1 {color: blue;}
p {color: red;}
.greeting {
color: red;
font-weight: bold;
}
</style>
def text_to_metadata(text, allow_title=False):
"""Parse the language/cell title and associated metadata"""
# Parse the language or cell title = everything before the last blank space before { or =
text = text.strip()
first_curly_bracket = text.find("{")
first_equal_sign = text.find("=")
if first_curly_bracket < 0 or (0 <= first_equal_sign < first_curly_bracket):
# this is a key=value metadata line
# case one = the options may be preceded with a language
if not allow_title:
if is_jupyter_language(text):
return text, {}
if " " not in text:
return "", parse_key_equal_value(text)
language, options = text.split(" ", 1)
if is_jupyter_language(language):
return language, parse_key_equal_value(options)
return "", parse_key_equal_value(text)
# case two = a title may be before the options
# we split the title into words
if first_equal_sign >= 0:
words = text[:first_equal_sign].split(" ")
# last word is the key before the equal sign!
while words and not words[-1]:
words.pop()
if words:
words.pop()
else:
words = text.split(" ")
# and we remove words on the right that are attributes (they start with '.')
while words and (not words[-1].strip() or words[-1].startswith(".")):
words.pop()
title = " ".join(words)
return title, parse_key_equal_value(text[len(title) :])
# json metadata line
return (
text[:first_curly_bracket].strip(),
relax_json_loads(text[first_curly_bracket:], catch=True),
)
@language vue
<template>
<body>
<h2>Width and Height Attributes</h2>
<p class="greeting">{{ greeting }}</p>
<p>
The width and height attributes of the img tag, defines the width and
height of the image:
</p>
<img src="img_girl.jpg" width="500" height="600">
</body>
</template>
<script setup>
import { ref } from 'vue'
const greeting = ref('Hello World!')
</script>
<style>
body {background-color: powderblue;}
h1 {color: blue;}
p {color: red;}
.greeting {
color: red;
font-weight: bold;
}
</style>
<template>
<p class="greeting">{{ greeting 3 }}</p>
</template>
@language html
<!DOCTYPE html>
<
<body>
</html>
@language css
<style>
body {background-color: powderblue;}
h1 {color: blue;}
p {color: red;}
</style>
def metadata_to_text(language_or_title, metadata=None, plain_json=False):
"""Write the cell metadata in the format key=value"""
# Was metadata the first argument?
if metadata is None:
metadata, language_or_title = language_or_title, metadata
metadata = {
key: metadata[key] for key in metadata if key not in _JUPYTEXT_CELL_METADATA
}
text = [language_or_title] if language_or_title else []
if language_or_title is None:
if (
"title" in metadata
and "{" not in metadata["title"]
and "=" not in metadata["title"]
):
text.append(metadata.pop("title"))
if plain_json:
if metadata:
text.append(dumps(metadata))
else:
for key in metadata:
if key == "incorrectly_encoded_metadata":
text.append(metadata[key])
elif metadata[key] is None:
text.append(key)
else:
text.append(f"{key}={dumps(metadata[key])}")
return " ".join(text)
import leo.core.leoGlobals as g
from openai import OpenAI
# client = OpenAI()
# Set your OpenAI API key
client = OpenAI(api_key = "<YOUR KEY HERE>")
@others
g.plugin_signon(__name__)
add_openai_command(c)
@language python
@tabwidth -4
def summarize_and_reformat(content):
"""
Sends the given content to OpenAI to summarize and reformat it using the updated API.
"""
try:
# OpenAI API request
response = client.chat.completions.create(
model="gpt-4o-mini", # Use "gpt-3.5-turbo" or "gpt-4" based on your access
messages=[
{"role": "system", "content":
"You are a helpful assistant. Summarize and reformat the text you are given."
},
{"role": "user", "content": content},
],
temperature=0.7,
max_tokens=2000, # Adjust as needed
)
# Extract and return the response
message = response.choices[0].message.content
g.es(f"Response from openai: {message}")
return message
except Exception as e:
g.es(f"Error while interacting with OpenAI: {e}", color="red")
return None
def process_node_with_openai(c, p):
"""
Summarizes and reformats the content of the current node and creates a new sub-node with the result.
"""
# Get the current node's content
content = p.b.strip()
if not content:
g.es("The node is empty. Nothing to summarize.", color="red")
return
# g.es(f"(content in the pane: {content}")
g.es("Sending content to OpenAI for summarization and reformatting...", color="blue")
# Call the OpenAI function
summarized_content = summarize_and_reformat(content)
if summarized_content:
# Create a new sub-node with the summarized content
new_node = p.insertAsLastChild()
new_node.h = "Summarized and Reformatted Content"
new_node.b = summarized_content
g.es("New sub-node created with summarized and reformatted text.", color="green")
else:
g.es("Failed to create sub-node. See error logs for details.", color="red")
def add_openai_command(c):
"""Add the command to the Leo editor."""
command_name = 'summarize-and-reformat'
c.k.registerCommand(command_name, lambda event: process_node_with_openai(c, c.p))
g.es(f"Command '{command_name}' added to Leo.", color="green")
@language csharp
// Test
if (abc):
a = b;
"""Read notebook cells from their text representation"""
import re
import warnings
from copy import copy
from nbformat.v4.nbbase import new_code_cell, new_markdown_cell, new_raw_cell
from packaging.version import parse
from .doxygen import doxygen_to_markdown
from .languages import _SCRIPT_EXTENSIONS
# Sphinx Gallery is an optional dependency. And we intercept the SyntaxError for #301
try:
from sphinx_gallery import __version__ as sg_version
if parse(sg_version) <= parse("0.7.0"):
from sphinx_gallery.notebook import rst2md
else:
warnings.warn(
f"Sphinx Gallery in version {sg_version} is not supported by Jupytext. "
f"Please use sphinx-gallery<=0.7.0 instead. "
f"If that is an issue, feel free to report it "
f"at https://github.com/mwouts/jupytext/issues, or even better, "
f"prepare a PR to handle the new signature of "
f"sphinx_gallery.notebook.rst2md."
)
rst2md = None
except (ImportError, SyntaxError): # pragma: no cover
rst2md = None
from .cell_metadata import (
is_active,
is_json_metadata,
rmd_options_to_metadata,
text_to_metadata,
)
from .languages import _JUPYTER_LANGUAGES_LOWER_AND_UPPER
from .magics import is_magic, need_explicit_marker, uncomment_magic, unescape_code_start
from .pep8 import pep8_lines_between_cells
from .stringparser import StringParser
_BLANK_LINE = re.compile(r"^\s*$")
_PY_INDENTED = re.compile(r"^\s")
@others
@language python
@tabwidth -4
def uncomment(lines, prefix="#", suffix=""):
"""Remove prefix and space, or only prefix, when possible"""
if prefix:
prefix_and_space = prefix + " "
length_prefix = len(prefix)
length_prefix_and_space = len(prefix_and_space)
lines = [
line[length_prefix_and_space:]
if line.startswith(prefix_and_space)
else (line[length_prefix:] if line.startswith(prefix) else line)
for line in lines
]
if suffix:
space_and_suffix = " " + suffix
length_suffix = len(suffix)
length_space_and_suffix = len(space_and_suffix)
lines = [
line[:-length_space_and_suffix]
if line.endswith(space_and_suffix)
else (line[:-length_suffix] if line.endswith(suffix) else line)
for line in lines
]
return lines
def paragraph_is_fully_commented(lines, comment, main_language):
"""Is the paragraph fully commented?"""
for i, line in enumerate(lines):
if line.startswith(comment):
if line[len(comment) :].lstrip().startswith(comment):
continue
if is_magic(line, main_language):
return False
continue
return i > 0 and _BLANK_LINE.match(line)
return True
def next_code_is_indented(lines):
"""Is the next unescaped line indented?"""
for line in lines:
if _BLANK_LINE.match(line):
continue
return _PY_INDENTED.match(line)
return False
def count_lines_to_next_cell(cell_end_marker, next_cell_start, total, explicit_eoc):
"""How many blank lines between end of cell marker and next cell?"""
if cell_end_marker < total:
lines_to_next_cell = next_cell_start - cell_end_marker
if explicit_eoc:
lines_to_next_cell -= 1
if next_cell_start >= total:
lines_to_next_cell += 1
return lines_to_next_cell
return 1
def last_two_lines_blank(source):
"""Are the two last lines blank, and not the third last one?"""
if len(source) < 3:
return False
return (
not _BLANK_LINE.match(source[-3])
and _BLANK_LINE.match(source[-2])
and _BLANK_LINE.match(source[-1])
)
class BaseCellReader:
"""A class that can read notebook cells from their text representation"""
@others
default_comment_magics = None
lines_to_next_cell = 1
start_code_re = None
simple_start_code_re = None
end_code_re = None
# How to make code inactive
comment = ""
# Any specific prefix for lines in markdown cells (like in R spin format?)
markdown_prefix = None
def __init__(self, fmt=None, default_language=None):
"""Create a cell reader with empty content"""
if not fmt:
fmt = {}
self.ext = fmt.get("extension")
self.default_language = default_language or _SCRIPT_EXTENSIONS.get(
self.ext, {}
).get("language", "python")
self.comment_magics = fmt.get("comment_magics", self.default_comment_magics)
self.use_runtools = fmt.get("use_runtools", False)
self.format_version = fmt.get("format_version")
self.metadata = None
self.org_content = []
self.content = []
self.explicit_soc = None
self.explicit_eoc = None
self.cell_type = None
self.language = None
self.cell_metadata_json = fmt.get("cell_metadata_json", False)
self.doxygen_equation_markers = fmt.get("doxygen_equation_markers", False)
def read(self, lines):
"""Read one cell from the given lines, and return the cell,
plus the position of the next cell
"""
# Do we have an explicit code marker on the first line?
self.metadata_and_language_from_option_line(lines[0])
if self.metadata and "language" in self.metadata:
self.language = self.metadata.pop("language")
# Parse cell till its end and set content, lines_to_next_cell
pos_next_cell = self.find_cell_content(lines)
if self.cell_type == "code":
new_cell = new_code_cell
elif self.cell_type == "markdown":
new_cell = new_markdown_cell
else:
new_cell = new_raw_cell
if not self.metadata:
self.metadata = {}
if self.ext == ".py":
expected_blank_lines = pep8_lines_between_cells(
self.org_content or [""], lines[pos_next_cell:], self.ext
)
else:
expected_blank_lines = 1
if self.lines_to_next_cell != expected_blank_lines:
self.metadata["lines_to_next_cell"] = self.lines_to_next_cell
if self.language:
self.metadata["language"] = self.language
return (
new_cell(source="\n".join(self.content), metadata=self.metadata),
pos_next_cell,
)
def metadata_and_language_from_option_line(self, line):
"""Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary."""
if self.start_code_re.match(line):
self.language, self.metadata = self.options_to_metadata(
self.start_code_re.findall(line)[0]
)
http://www.google.com/search
def options_to_metadata(self, options):
"""Return language (str) and metadata (dict) from the option line"""
raise NotImplementedError("Option parsing must be implemented in a sub class")
def find_cell_end(self, lines):
"""Return position of end of cell marker, and position
of first line after cell"""
raise NotImplementedError("This method must be implemented in a sub class")
def find_cell_content(self, lines):
"""Parse cell till its end and set content, lines_to_next_cell.
Return the position of next cell start"""
cell_end_marker, next_cell_start, self.explicit_eoc = self.find_cell_end(lines)
# Metadata to dict
if self.metadata is None:
cell_start = 0
self.metadata = {}
else:
cell_start = 1
# Cell content
source = lines[cell_start:cell_end_marker]
self.org_content = copy(source)
# Exactly two empty lines at the end of cell (caused by PEP8)?
if self.ext == ".py" and self.explicit_eoc:
if last_two_lines_blank(source):
source = source[:-2]
lines_to_end_of_cell_marker = 2
else:
lines_to_end_of_cell_marker = 0
pep8_lines = pep8_lines_between_cells(
source, lines[cell_end_marker:], self.ext
)
if lines_to_end_of_cell_marker != (0 if pep8_lines == 1 else 2):
self.metadata[
"lines_to_end_of_cell_marker"
] = lines_to_end_of_cell_marker
# Uncomment content
self.explicit_soc = cell_start > 0
self.content = self.extract_content(source)
# Is this an inactive cell?
if self.cell_type == "code":
if not is_active(".ipynb", self.metadata):
if self.metadata.get("active") == "":
del self.metadata["active"]
self.cell_type = "raw"
elif self.ext in [".md", ".markdown"] and not self.language:
# Markdown files in version >= 1.3 represent code chunks with no language as Markdown cells
if self.format_version not in ["1.0", "1.1"]:
self.cell_type = "markdown"
self.explicit_eoc = False
cell_end_marker += 1
self.content = lines[:cell_end_marker]
# Previous versions mapped those to raw cells
else:
self.cell_type = "raw"
# Explicit end of cell marker?
if (
next_cell_start + 1 < len(lines)
and _BLANK_LINE.match(lines[next_cell_start])
and not _BLANK_LINE.match(lines[next_cell_start + 1])
):
next_cell_start += 1
elif (
self.explicit_eoc
and next_cell_start + 2 < len(lines)
and _BLANK_LINE.match(lines[next_cell_start])
and _BLANK_LINE.match(lines[next_cell_start + 1])
and not _BLANK_LINE.match(lines[next_cell_start + 2])
):
next_cell_start += 2
self.lines_to_next_cell = count_lines_to_next_cell(
cell_end_marker, next_cell_start, len(lines), self.explicit_eoc
)
return next_cell_start
def uncomment_code_and_magics(self, lines):
"""Uncomment code and possibly commented magic commands"""
raise NotImplementedError("This method must be implemented in a sub class")
def extract_content(self, lines):
# Code cells with just a multiline string become Markdown cells
if self.ext == ".py" and not is_active(
self.ext, self.metadata, self.cell_type == "code"
):
content = "\n".join(lines).strip()
for prefix in [""] if self.ext != ".py" else ["", "r", "R"]:
for triple_quote in ['"""', "'''"]:
left = prefix + triple_quote
right = triple_quote
if (
content.startswith(left)
and content.endswith(right)
and len(content) >= len(left + right)
):
content = content[len(left) : -len(right)]
# Trim first/last line return
if content.startswith("\n"):
content = content[1:]
left = left + "\n"
if content.endswith("\n"):
content = content[:-1]
right = "\n" + right
if not prefix:
if len(left) == len(right) == 4:
self.metadata["cell_marker"] = left[:3]
elif len(left[1:]) == len(right) == 4:
self.metadata["cell_marker"] = left[:4]
else:
self.metadata["cell_marker"] = left + "," + right
return content.splitlines()
if not is_active(self.ext, self.metadata) or (
"active" not in self.metadata
and self.language
and self.language != self.default_language
):
return uncomment(
lines, self.comment if self.ext not in [".r", ".R"] else "#"
)
return self.uncomment_code_and_magics(lines)
class MarkdownCellReader(BaseCellReader):
"""Read notebook cells from Markdown documents"""
@others
comment = ""
start_code_re = re.compile(
r"^```(`*)(\s*)({})($|\s.*$)".format(
"|".join(_JUPYTER_LANGUAGES_LOWER_AND_UPPER).replace("+", "\\+")
)
)
non_jupyter_code_re = re.compile(r"^```")
end_code_re = re.compile(r"^```\s*$")
start_region_re = re.compile(r"^<!--\s*#(region|markdown|md|raw)(.*)-->\s*$")
end_region_re = None
default_comment_magics = False
def __init__(self, fmt=None, default_language=None):
super().__init__(fmt, default_language)
self.split_at_heading = (fmt or {}).get("split_at_heading", False)
self.in_region = False
self.in_raw = False
if self.format_version in ["1.0", "1.1"] and self.ext != ".Rmd":
# Restore the pattern used in Markdown <= 1.1
self.start_code_re = re.compile(r"^```(.*)")
self.non_jupyter_code_re = re.compile(r"^```\{")
def metadata_and_language_from_option_line(self, line):
match_region = self.start_region_re.match(line)
if match_region:
self.in_region = True
groups = match_region.groups()
region_name = groups[0]
self.end_region_re = re.compile(rf"^<!--\s*#end{region_name}\s*-->\s*$")
self.cell_metadata_json = self.cell_metadata_json or is_json_metadata(
groups[1]
)
title, self.metadata = text_to_metadata(groups[1], allow_title=True)
if region_name == "raw":
self.cell_type = "raw"
else:
self.cell_type = "markdown"
if title:
self.metadata["title"] = title
if region_name in ["markdown", "md"]:
self.metadata["region_name"] = region_name
elif self.start_code_re.match(line):
self.language, self.metadata = self.options_to_metadata(
self.start_code_re.findall(line)[0]
)
# Cells with a .noeval attribute are markdown cells #347
if self.metadata.get(".noeval", "") is None:
self.cell_type = "markdown"
self.metadata = {}
self.language = None
def options_to_metadata(self, options):
if isinstance(options, tuple):
self.end_code_re = re.compile("```" + options[0])
options = " ".join(options[1:])
else:
self.end_code_re = re.compile(r"^```\s*$")
self.cell_metadata_json = self.cell_metadata_json or is_json_metadata(options)
return text_to_metadata(options)
def find_cell_end(self, lines):
"""Return position of end of cell marker, and position
of first line after cell"""
if self.in_region:
for i, line in enumerate(lines):
if self.end_region_re.match(line):
return i, i + 1, True
elif self.metadata is None:
# default markdown: (last) two consecutive blank lines, except when in code blocks
self.cell_type = "markdown"
prev_blank = 0
in_explicit_code_block = False
in_indented_code_block = False
for i, line in enumerate(lines):
if in_explicit_code_block and self.end_code_re.match(line):
in_explicit_code_block = False
continue
if (
prev_blank
and line.startswith(" ")
and not _BLANK_LINE.match(line)
):
in_indented_code_block = True
prev_blank = 0
continue
if (
in_indented_code_block
and not _BLANK_LINE.match(line)
and not line.startswith(" ")
):
in_indented_code_block = False
if in_indented_code_block or in_explicit_code_block:
continue
if self.start_region_re.match(line):
if i > 1 and prev_blank:
return i - 1, i, False
return i, i, False
if self.start_code_re.match(line):
if line.startswith("```{bibliography}"):
in_explicit_code_block = True
prev_blank = 0
continue
# Cells with a .noeval attribute are markdown cells #347
_, metadata = self.options_to_metadata(
self.start_code_re.findall(line)[0]
)
if metadata.get(".noeval", "") is None:
in_explicit_code_block = True
prev_blank = 0
continue
if i > 1 and prev_blank:
return i - 1, i, False
return i, i, False
if self.non_jupyter_code_re.match(line):
if prev_blank >= 2:
return i - 2, i, True
in_explicit_code_block = True
prev_blank = 0
continue
if self.split_at_heading and line.startswith("#") and prev_blank >= 1:
return i - 1, i, False
if _BLANK_LINE.match(lines[i]):
prev_blank += 1
elif prev_blank >= 2:
return i - 2, i, True
else:
prev_blank = 0
else:
self.cell_type = "code"
# At some point we could remove the below, in which we make sure not to break language strings
# into multiple cells (#419). Indeed, now that the markdown cell uses one extra backtick (#712)
# we should not have the issue any more
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
# skip cell header
if i == 0:
continue
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
if self.end_code_re.match(line):
return i, i + 1, True
# End not found
return len(lines), len(lines), False
def uncomment_code_and_magics(self, lines):
if self.cell_type == "code" and self.comment_magics:
lines = uncomment_magic(lines, self.language)
if self.cell_type == "markdown" and self.doxygen_equation_markers:
lines = doxygen_to_markdown("\n".join(lines)).splitlines()
return lines
class RMarkdownCellReader(MarkdownCellReader):
"""Read notebook cells from R Markdown notebooks"""
@others
comment = ""
start_code_re = re.compile(r"^```{(.*)}\s*$")
non_jupyter_code_re = re.compile(r"^```([^\{]|\s*$)")
default_language = "R"
default_comment_magics = True
def options_to_metadata(self, options):
return rmd_options_to_metadata(options, self.use_runtools)
def uncomment_code_and_magics(self, lines):
if (
self.cell_type == "code"
and self.comment_magics
and is_active(self.ext, self.metadata)
):
uncomment_magic(lines, self.language or self.default_language)
return lines
class ScriptCellReader(BaseCellReader): # pylint: disable=W0223
"""Read notebook cells from scripts
(common base for R and Python scripts)"""
@others
def uncomment_code_and_magics(self, lines):
if self.cell_type == "code" or self.comment != "#'":
if self.comment_magics:
if is_active(self.ext, self.metadata):
uncomment_magic(
lines,
self.language or self.default_language,
explicitly_code=self.explicit_soc,
)
if (
self.cell_type == "code"
and not self.explicit_soc
and need_explicit_marker(
lines, self.language or self.default_language
)
):
self.metadata["comment_questions"] = False
else:
lines = uncomment(lines)
if self.default_language == "go" and self.language is None:
lines = [
re.sub(r"^((//\s*)*)(//\s*gonb:%%)", r"\1%%", line) for line in lines
]
if self.cell_type == "code":
return unescape_code_start(
lines, self.ext, self.language or self.default_language
)
return uncomment(
lines, self.markdown_prefix or self.comment, self.comment_suffix
)
class RScriptCellReader(ScriptCellReader):
"""Read notebook cells from R scripts written according
to the knitr-spin syntax"""
@others
comment = "#'"
comment_suffix = ""
markdown_prefix = "#'"
default_language = "R"
start_code_re = re.compile(r"^#\+(.*)\s*$")
default_comment_magics = True
def options_to_metadata(self, options):
return rmd_options_to_metadata("r " + options, self.use_runtools)
def find_cell_end(self, lines):
"""Return position of end of cell marker, and position
of first line after cell"""
if self.metadata is None and lines[0].startswith("#'"):
self.cell_type = "markdown"
for i, line in enumerate(lines):
if not line.startswith("#'"):
if _BLANK_LINE.match(line):
return i, i + 1, False
return i, i, False
return len(lines), len(lines), False
if self.metadata and "cell_type" in self.metadata:
self.cell_type = self.metadata.pop("cell_type")
else:
self.cell_type = "code"
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
# skip cell header
if self.metadata is not None and i == 0:
continue
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
if self.start_code_re.match(line) or (
self.markdown_prefix and line.startswith(self.markdown_prefix)
):
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
if i > 1 and _BLANK_LINE.match(lines[i - 2]):
return i - 2, i, False
return i - 1, i, False
return i, i, False
if _BLANK_LINE.match(line):
if not next_code_is_indented(lines[i:]):
if i > 0:
return i, i + 1, False
if len(lines) > 1 and not _BLANK_LINE.match(lines[1]):
return 1, 1, False
return 1, 2, False
return len(lines), len(lines), False
class LightScriptCellReader(ScriptCellReader):
"""Read notebook cells from plain Python or Julia files. Cells
are identified by line breaks, unless they start with an
explicit marker '# +'"""
@others
default_comment_magics = True
cell_marker_start = None
cell_marker_end = None
def __init__(self, fmt=None, default_language=None):
super().__init__(fmt, default_language)
self.ext = self.ext or ".py"
script = _SCRIPT_EXTENSIONS[self.ext]
self.default_language = default_language or script["language"]
self.comment = script["comment"]
self.comment_suffix = script.get("comment_suffix", "")
self.ignore_end_marker = True
self.explicit_end_marker_required = False
if (
fmt
and fmt.get("format_name", "light") == "light"
and "cell_markers" in fmt
and fmt["cell_markers"] != "+,-"
):
self.cell_marker_start, self.cell_marker_end = fmt["cell_markers"].split(
",", 1
)
self.start_code_re = re.compile(
"^"
+ re.escape(self.comment)
+ r"\s*"
+ self.cell_marker_start
+ r"(.*)$"
)
self.end_code_re = re.compile(
"^" + re.escape(self.comment) + r"\s*" + self.cell_marker_end + r"\s*$"
)
else:
self.start_code_re = re.compile(
"^" + re.escape(self.comment) + r"\s*\+(.*)$"
)
def metadata_and_language_from_option_line(self, line):
if self.start_code_re.match(line):
# Remove the OCAML suffix
if self.comment_suffix:
if line.endswith(" " + self.comment_suffix):
line = line[: -len(" " + self.comment_suffix)]
elif line.endswith(self.comment_suffix):
line = line[: -len(self.comment_suffix)]
# We want to parse inner most regions as cells.
# Thus, if we find another region start before the end for this region,
# we will have ignore the metadata that we found here, and move on to the next cell.
groups = self.start_code_re.match(line).groups()
self.language, self.metadata = self.options_to_metadata(groups[0])
self.ignore_end_marker = False
if self.cell_marker_start:
self.explicit_end_marker_required = True
elif self.simple_start_code_re and self.simple_start_code_re.match(line):
self.metadata = {}
self.ignore_end_marker = False
elif self.cell_marker_end and self.end_code_re.match(line):
self.metadata = None
self.cell_type = "code"
def options_to_metadata(self, options):
self.cell_metadata_json = self.cell_metadata_json or is_json_metadata(options)
title, metadata = text_to_metadata(options, allow_title=True)
# Cell type
for cell_type in ["markdown", "raw", "md"]:
code = f"[{cell_type}]"
if code in title:
title = title.replace(code, "").strip()
metadata["cell_type"] = cell_type
if cell_type == "md":
metadata["region_name"] = cell_type
metadata["cell_type"] = "markdown"
break
# Spyder has sub cells
cell_depth = 0
while title.startswith("%"):
cell_depth += 1
title = title[1:]
if cell_depth:
metadata["cell_depth"] = cell_depth
title = title.strip()
if title:
metadata["title"] = title
return None, metadata
def find_cell_end(self, lines):
"""Return position of end of cell marker, and position of first line after cell"""
if (
self.metadata is None
and not (self.cell_marker_end and self.end_code_re.match(lines[0]))
and paragraph_is_fully_commented(lines, self.comment, self.default_language)
):
self.cell_type = "markdown"
for i, line in enumerate(lines):
if _BLANK_LINE.match(line):
return i, i + 1, False
return len(lines), len(lines), False
if self.metadata is None:
self.end_code_re = None
elif not self.cell_marker_end:
end_of_cell = self.metadata.get("endofcell", "-")
self.end_code_re = re.compile(
"^" + re.escape(self.comment) + " " + end_of_cell + r"\s*$"
)
return self.find_region_end(lines)
def find_region_end(self, lines):
"""Find the end of the region started with start and end markers"""
if self.metadata and "cell_type" in self.metadata:
self.cell_type = self.metadata.pop("cell_type")
else:
self.cell_type = "code"
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
# skip cell header
if self.metadata is not None and i == 0:
continue
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
# New code region
# Simple code pattern in LightScripts must be preceded with a blank line
if self.start_code_re.match(line) or (
self.simple_start_code_re
and self.simple_start_code_re.match(line)
and (
self.cell_marker_start or i == 0 or _BLANK_LINE.match(lines[i - 1])
)
):
if self.explicit_end_marker_required:
# Metadata here was conditioned on finding an explicit end marker
# before the next start marker. So we dismiss it.
self.metadata = None
self.language = None
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
if i > 1 and _BLANK_LINE.match(lines[i - 2]):
return i - 2, i, False
return i - 1, i, False
return i, i, False
if not self.ignore_end_marker and self.end_code_re:
if self.end_code_re.match(line):
return i, i + 1, True
elif _BLANK_LINE.match(line):
if not next_code_is_indented(lines[i:]):
if i > 0:
return i, i + 1, False
if len(lines) > 1 and not _BLANK_LINE.match(lines[1]):
return 1, 1, False
return 1, 2, False
return len(lines), len(lines), False
class DoublePercentScriptCellReader(LightScriptCellReader):
"""Read notebook cells from Spyder/VScode scripts (#59)"""
@others
default_comment_magics = True
def __init__(self, fmt, default_language=None):
LightScriptCellReader.__init__(self, fmt, default_language)
script = _SCRIPT_EXTENSIONS[self.ext]
self.default_language = default_language or script["language"]
self.comment = script["comment"]
self.comment_suffix = script.get("comment_suffix", "")
self.start_code_re = re.compile(
rf"^\s*{re.escape(self.comment)}\s*%%(%*)\s(.*)$"
)
self.alternative_start_code_re = re.compile(
r"^\s*{}\s*(%%|<codecell>|In\[[0-9 ]*\]:?)\s*$".format(
re.escape(self.comment)
)
)
self.explicit_soc = True
def metadata_and_language_from_option_line(self, line):
"""Parse code options on the given line. When a start of a code cell
is found, self.metadata is set to a dictionary."""
if self.start_code_re.match(line):
line = uncomment([line], self.comment, self.comment_suffix)[0]
self.language, self.metadata = self.options_to_metadata(
line[line.find("%%") + 2 :]
)
else:
self.metadata = {}
def find_cell_content(self, lines):
"""Parse cell till its end and set content, lines_to_next_cell.
Return the position of next cell start"""
cell_end_marker, next_cell_start, explicit_eoc = self.find_cell_end(lines)
# Metadata to dict
if self.start_code_re.match(lines[0]) or self.alternative_start_code_re.match(
lines[0]
):
cell_start = 1
else:
cell_start = 0
# Cell content
source = lines[cell_start:cell_end_marker]
self.org_content = copy(source)
self.content = self.extract_content(source)
self.lines_to_next_cell = count_lines_to_next_cell(
cell_end_marker, next_cell_start, len(lines), explicit_eoc
)
return next_cell_start
def find_cell_end(self, lines):
"""Return position of end of cell marker, and position
of first line after cell"""
if self.metadata and "cell_type" in self.metadata:
self.cell_type = self.metadata.pop("cell_type")
elif not is_active(".ipynb", self.metadata):
if self.metadata.get("active") == "":
del self.metadata["active"]
self.cell_type = "raw"
self.comment = ""
else:
self.cell_type = "code"
next_cell = len(lines)
parser = StringParser(self.language or self.default_language)
for i, line in enumerate(lines):
if parser.is_quoted():
parser.read_line(line)
continue
parser.read_line(line)
if i > 0 and (
self.start_code_re.match(line)
or self.alternative_start_code_re.match(line)
):
next_cell = i
break
if last_two_lines_blank(lines[:next_cell]):
return next_cell - 2, next_cell, False
if next_cell > 0 and _BLANK_LINE.match(lines[next_cell - 1]):
return next_cell - 1, next_cell, False
return next_cell, next_cell, False
@language md
# Loading and Analyzing Data
# ==========================
*Italics* and **bold**.
More markdown. nxy
Here is come python code::
print('Hi')
class HydrogenCellReader(DoublePercentScriptCellReader):
"""Read notebook cells from Hydrogen scripts (#59)"""
default_comment_magics = False
class SphinxGalleryScriptCellReader(ScriptCellReader): # pylint: disable=W0223
"""Read notebook cells from Sphinx Gallery scripts (#80)"""
@others
comment = "#"
default_language = "python"
default_comment_magics = True
twenty_hash = re.compile(r"^#( |)#{19,}\s*$")
default_markdown_cell_marker = "#" * 79
markdown_marker = None
def __init__(self, fmt=None, default_language="python"):
super().__init__(fmt, default_language)
self.ext = ".py"
self.rst2md = (fmt or {}).get("rst2md", False)
def start_of_new_markdown_cell(self, line):
"""Does this line starts a new markdown cell?
Then, return the cell marker"""
for empty_markdown_cell in ['""', "''"]:
if line == empty_markdown_cell:
return empty_markdown_cell
for triple_quote in ['"""', "'''"]:
if line.startswith(triple_quote):
return triple_quote
if self.twenty_hash.match(line):
return line
return None
def metadata_and_language_from_option_line(self, line):
self.markdown_marker = self.start_of_new_markdown_cell(line)
if self.markdown_marker:
self.cell_type = "markdown"
if self.markdown_marker != self.default_markdown_cell_marker:
self.metadata = {"cell_marker": self.markdown_marker}
else:
self.cell_type = "code"
def find_cell_end(self, lines):
"""Return position of end of cell, and position
of first line after cell, and whether there was an
explicit end of cell marker"""
if self.cell_type == "markdown":
# Empty cell "" or ''
if len(self.markdown_marker) <= 2:
if len(lines) == 1 or _BLANK_LINE.match(lines[1]):
return 0, 2, True
return 0, 1, True
# Multi-line comment with triple quote
if len(self.markdown_marker) == 3:
for i, line in enumerate(lines):
if (
i > 0 or line.strip() != self.markdown_marker
) and line.rstrip().endswith(self.markdown_marker):
explicit_end_of_cell_marker = (
line.strip() == self.markdown_marker
)
if explicit_end_of_cell_marker:
end_of_cell = i
else:
end_of_cell = i + 1
if len(lines) <= i + 1 or _BLANK_LINE.match(lines[i + 1]):
return end_of_cell, i + 2, explicit_end_of_cell_marker
return end_of_cell, i + 1, explicit_end_of_cell_marker
else:
# 20 # or more
for i, line in enumerate(lines[1:], 1):
if not line.startswith(self.comment):
if _BLANK_LINE.match(line):
return i, i + 1, False
return i, i, False
elif self.cell_type == "code":
parser = StringParser("python")
for i, line in enumerate(lines):
if parser.is_quoted():
parser.read_line(line)
continue
if self.start_of_new_markdown_cell(line):
if i > 0 and _BLANK_LINE.match(lines[i - 1]):
return i - 1, i, False
return i, i, False
parser.read_line(line)
return len(lines), len(lines), False
def find_cell_content(self, lines):
"""Parse cell till its end and set content, lines_to_next_cell.
Return the position of next cell start"""
cell_end_marker, next_cell_start, explicit_eoc = self.find_cell_end(lines)
# Metadata to dict
cell_start = 0
if self.cell_type == "markdown":
if self.markdown_marker in ['"""', "'''"]:
# Remove the triple quotes
if lines[0].strip() == self.markdown_marker:
cell_start = 1
else:
lines[0] = lines[0][3:]
if not explicit_eoc:
last = lines[cell_end_marker - 1]
lines[cell_end_marker - 1] = last[
: last.rfind(self.markdown_marker)
]
if self.twenty_hash.match(self.markdown_marker):
cell_start = 1
else:
self.metadata = {}
# Cell content
source = lines[cell_start:cell_end_marker]
self.org_content = copy(source)
if self.cell_type == "code" and self.comment_magics:
uncomment_magic(source, self.language or self.default_language)
if self.cell_type == "markdown" and source:
if self.markdown_marker.startswith(self.comment):
source = uncomment(source, self.comment)
if self.rst2md:
if rst2md:
source = rst2md("\n".join(source)).splitlines()
else:
raise ImportError(
"Could not import rst2md from sphinx_gallery.notebook"
) # pragma: no cover
self.content = source
self.lines_to_next_cell = count_lines_to_next_cell(
cell_end_marker, next_cell_start, len(lines), explicit_eoc
)
return next_cell_start
"""Export notebook cells as text"""
import re
import warnings
from copy import copy
from .cell_metadata import (
_IGNORE_CELL_METADATA,
is_active,
metadata_to_double_percent_options,
metadata_to_rmd_options,
metadata_to_text,
)
from .cell_reader import LightScriptCellReader, MarkdownCellReader, RMarkdownCellReader
from .doxygen import markdown_to_doxygen
from .languages import _SCRIPT_EXTENSIONS, cell_language, comment_lines, same_language
from .magics import comment_magic, escape_code_start, need_explicit_marker
from .metadata_filter import filter_metadata
from .pep8 import pep8_lines_between_cells
@others
@language python
@tabwidth -4
def cell_source(cell):
"""Return the source of the current cell, as an array of lines"""
source = cell.source
if source == "":
return [""]
if source.endswith("\n"):
return source.splitlines() + [""]
return source.splitlines()
def three_backticks_or_more(lines):
"""Return a string with enough backticks to encapsulate the given code cell in Markdown
cf. https://github.com/mwouts/jupytext/issues/712"""
code_cell_delimiter = "```"
for line in lines:
if not line.startswith(code_cell_delimiter):
continue
for char in line[len(code_cell_delimiter) :]:
if char != "`":
break
code_cell_delimiter += "`"
code_cell_delimiter += "`"
return code_cell_delimiter