{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hide_input": false
},
"outputs": [],
"source": [
"#hide\n",
"#default_exp export2html\n",
"#default_cls_lvl 3\n",
"from nbdev.showdoc import show_doc"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hide_input": false
},
"outputs": [],
"source": [
"#export\n",
"from nbdev.imports import *\n",
"from nbdev.sync import *\n",
"from nbdev.export import *\n",
"from nbdev.export import _mk_flag_re\n",
"from nbdev.showdoc import *\n",
"from nbdev.template import *\n",
"from fastcore.foundation import *\n",
"\n",
"from html.parser import HTMLParser\n",
"from nbconvert.preprocessors import ExecutePreprocessor, Preprocessor\n",
"from nbconvert import HTMLExporter,MarkdownExporter\n",
"import traitlets"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Convert to html\n",
"\n",
"> The functions that transform the dev notebooks in the documentation of the library\n",
"\n",
"- toc: true"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The most important function defined in this module is `notebook2html`, so you may want to jump to it before scrolling though the rest, which explain the details behind the scenes of the conversion from notebooks to the html documentation. The main things to remember are:\n",
"- put `#hide` at the top of any cell you want to completely hide in the docs\n",
"- use the hide input [jupyter extension](https://github.com/ipython-contrib/jupyter_contrib_nbextensions) to hide the input of some cells (by default all `show_doc` cells have that marker added)\n",
"- you can define some jekyll metadata in the markdown cell with the title, see `get_metadata`\n",
"- use backsticks for terms you want automatic links to be found, but use `` and `
` when you have homonyms and don't want those links\n",
"- you can define the default toc level of classes with `#default_cls_lvl` followed by a number (default is 2)\n",
"- you can add jekyll warnings, important or note banners with appropriate block quotes (see `add_jekyll_notes`)\n",
"- put any images you want to use in the images folder of your notebook folder, they will be automatically copied over to the docs folder\n",
"- put `#hide_input` at the top of a cell if you don't want code to be shown in the docs\n",
" - cells containing `#export` or `show_doc` have their code hidden automatically\n",
"- put `#hide_output` at the top of a cell if you don't want output to be shown in the docs\n",
"- use `#collapse_input` or `#collapse_output` to include code or output in the docs under a collapsable element"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Preprocessing notebook"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Cell processors"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"class HTMLParseAttrs(HTMLParser):\n",
" \"Simple HTML parser which stores any attributes in `attrs` dict\"\n",
" def handle_starttag(self, tag, attrs): self.tag,self.attrs = tag,dict(attrs)\n",
"\n",
" def attrs2str(self):\n",
" \"Attrs as string\"\n",
" return ' '.join([f'{k}=\"{v}\"' for k,v in self.attrs.items()])\n",
"\n",
" def show(self):\n",
" \"Tag with updated attrs\"\n",
" return f'<{self.tag} {self.attrs2str()} />'\n",
"\n",
" def __call__(self, s):\n",
" \"Parse `s` and store attrs\"\n",
" self.feed(s)\n",
" return self.attrs"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"h = HTMLParseAttrs()\n",
"t = h('')\n",
"test_eq(t['width'], '700')\n",
"test_eq(t['src' ], 'src')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"t['width'] = '600'\n",
"test_eq(h.show(), '')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"t['max-width'] = t.pop('width')\n",
"test_eq(h.show(), '')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following functions are applied on individual cells as a preprocessing step before the conversion to html."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def remove_widget_state(cell):\n",
" \"Remove widgets in the output of `cells`\"\n",
" if cell['cell_type'] == 'code' and 'outputs' in cell:\n",
" cell['outputs'] = [l for l in cell['outputs']\n",
" if not ('data' in l and 'application/vnd.jupyter.widget-view+json' in l.data)]\n",
" return cell"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Those outputs usually can't be rendered properly in html."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"# Note: `_re_show_doc` will catch show_doc even if it's commented out etc\n",
"_re_show_doc = re.compile(r\"\"\"\n",
"# Catches any show_doc and get the first argument in group 1\n",
"^\\s*show_doc # line can start with any amount of whitespace followed by show_doc\n",
"\\s*\\(\\s* # Any number of whitespace, opening (, any number of whitespace\n",
"([^,\\)\\s]*) # Catching group for any character but a comma, a closing ) or a whitespace\n",
"[,\\)\\s] # A comma, a closing ) or a whitespace\n",
"\"\"\", re.MULTILINE | re.VERBOSE)\n",
"\n",
"_re_hide_input = [\n",
" _mk_flag_re('export', (0,1), \"Cell that has `#export\"),\n",
" _mk_flag_re('(hide_input|hide-input)', 0, \"Cell that has `#hide_input` or `#hide-input`\")]\n",
"\n",
"_re_hide_output = _mk_flag_re('(hide_output|hide-output)', 0, \"Cell that has `#hide_output` or `#hide-output`\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def upd_metadata(cell, key, value=True):\n",
" \"Sets `key` to `value` on the `metadata` of `cell` without replacing metadata\"\n",
" cell.setdefault('metadata',{})[key] = value"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def hide_cells(cell):\n",
" \"Hide inputs of `cell` that need to be hidden\"\n",
" if check_re_multi(cell, [_re_show_doc, *_re_hide_input]): upd_metadata(cell, 'hide_input')\n",
" elif check_re(cell, _re_hide_output): upd_metadata(cell, 'hide_output')\n",
" return cell"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This concerns all the cells with `#export` or `#hide_input` flags and all the cells containing a `show_doc` for a function or class."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for source in ['show_doc(read_nb)', '# export\\nfrom local.core import *', '# hide_input\\n2+2',\n",
" 'line1\\n show_doc (read_nb) \\nline3', '# export with.mod\\nfrom local.core import *']:\n",
" cell = {'cell_type': 'code', 'source': source}\n",
" cell1 = hide_cells(cell.copy())\n",
" assert 'metadata' in cell1\n",
" assert 'hide_input' in cell1['metadata']\n",
" assert cell1['metadata']['hide_input']\n",
"\n",
"flag = '# exports'\n",
"cell = {'cell_type': 'code', 'source': f'{flag}\\nfrom local.core2 import *'}\n",
"test_eq(hide_cells(cell.copy()), cell)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This concerns all the cells with `#hide_output`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for source in ['# hide-output\\nfrom local.core import *', '# hide_output\\n2+2']:\n",
" cell = {'cell_type': 'code', 'source': source}\n",
" cell1 = hide_cells(cell.copy())\n",
" assert 'metadata' in cell1\n",
" assert 'hide_output' in cell1['metadata']\n",
" assert cell1['metadata']['hide_output']\n",
"\n",
"cell = {'cell_type': 'code', 'source': '# hide-outputs\\nfrom local.core import *'}\n",
"test_eq(hide_cells(cell.copy()), cell)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def clean_exports(cell):\n",
" \"Remove all flags from code `cell`s\"\n",
" if cell['cell_type'] == 'code': cell['source'] = split_flags_and_code(cell, str)[1]\n",
" return cell"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The rest of the cell is displayed without any modification."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"flag = '# exports'\n",
"cell = {'cell_type': 'code', 'source': f'{flag}\\nfrom local.core import *'}\n",
"test_eq(clean_exports(cell.copy()), {'cell_type': 'code', 'source': 'from local.core import *'})\n",
"cell['cell_type'] = 'markdown'\n",
"test_eq(clean_exports(cell.copy()), cell)\n",
"cell = {'cell_type': 'code', 'source': f'{flag} core\\nfrom local.core import *'}\n",
"test_eq(clean_exports(cell.copy()), {'cell_type': 'code', 'source': 'from local.core import *'})\n",
" \n",
"cell = {'cell_type': 'code', 'source': f'# comment \\n# exports\\nprint(\"something\")'}\n",
"test_eq(clean_exports(cell.copy()), {'cell_type': 'code', 'source': '# exports\\nprint(\"something\")'})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def treat_backticks(cell):\n",
" \"Add links to backticks words in `cell`\"\n",
" if cell['cell_type'] == 'markdown': cell['source'] = add_doc_links(cell['source'])\n",
" return cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cell = {'cell_type': 'markdown', 'source': 'This is a `DocsTestClass`'}\n",
"test_eq(treat_backticks(cell), {'cell_type': 'markdown',\n",
" 'source': 'This is a [`DocsTestClass`](/export.html#DocsTestClass)'})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_nb_link = re.compile(r\"\"\"\n",
"# Catches any link to a local notebook and keeps the title in group 1, the link without .ipynb in group 2\n",
"\\[ # Opening [\n",
"([^\\]]*) # Catching group for any character except ]\n",
"\\]\\( # Closing ], opening (\n",
"([^http] # Catching group that must not begin by html (local notebook)\n",
"[^\\)]*) # and containing anything but )\n",
".ipynb\\) # .ipynb and closing )\n",
"\"\"\", re.VERBOSE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_block_notes = re.compile(r\"\"\"\n",
"# Catches any pattern > Title: content with title in group 1 and content in group 2\n",
"^\\s*>\\s* # > followed by any number of whitespace\n",
"([^:]*) # Catching group for any character but :\n",
":\\s* # : then any number of whitespace\n",
"([^\\n]*) # Catching group for anything but a new line character\n",
"(?:\\n|$) # Non-catching group for either a new line or the end of the text\n",
"\"\"\", re.VERBOSE | re.MULTILINE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _to_html(text):\n",
" return text.replace(\"'\", \"’\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def add_jekyll_notes(cell):\n",
" \"Convert block quotes to jekyll notes in `cell`\"\n",
" styles = Config().get('jekyll_styles', 'note,warning,tip,important').split(',')\n",
" def _inner(m):\n",
" title,text = m.groups()\n",
" if title.lower() not in styles: return f\"> {title}:{text}\"\n",
" return '{% include '+title.lower()+\".html content=\\'\"+_to_html(text)+\"\\' %}\"\n",
" if cell['cell_type'] == 'markdown':\n",
" cell['source'] = _re_block_notes.sub(_inner, cell['source'])\n",
" return cell"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Supported styles are `Warning`, `Note` `Tip` and `Important`:\n",
"\n",
"Typing `> Warning: There will be no second warning!` will render in the docs:\n",
"\n",
"> Warning: There will be no second warning!\n",
"\n",
"Typing `> Important: Pay attention! It's important.` will render in the docs:\n",
"\n",
"> Important: Pay attention! It's important.\n",
"\n",
"Typing `> Tip: This is my tip.` will render in the docs:\n",
"\n",
"> Tip: This is my tip.\n",
"\n",
"Typing `> Note: Take note of this.` will render in the docs:\n",
"\n",
"> Note: Take note of this.\n",
"\n",
"Typing ``> Note: A doc link to `add_jekyll_notes` should also work fine.`` will render in the docs:\n",
"\n",
"> Note: A doc link to `add_jekyll_notes` should also work fine."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"for w in ['Warning', 'Note', 'Important', 'Tip', 'Bla']:\n",
" cell = {'cell_type': 'markdown', 'source': f\"> {w}: This is my final {w.lower()}!\"}\n",
" res = '{% include '+w.lower()+'.html content=\\'This is my final '+w.lower()+'!\\' %}'\n",
" if w != 'Bla': test_eq(add_jekyll_notes(cell), {'cell_type': 'markdown', 'source': res})\n",
" else: test_eq(add_jekyll_notes(cell), cell)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"cell = {'cell_type': 'markdown', 'source': f\"> This is a link, don't break me! https://my.link.com\"}\n",
"test_eq(add_jekyll_notes(cell.copy()), cell)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_image = re.compile(r\"\"\"\n",
"# Catches any image file used, either with `![alt](image_file)` or ``\n",
"^(!\\[ # Beginning of line (since re.MULTILINE is passed) followed by ![ in a catching group\n",
"[^\\]]* # Anything but ]\n",
"\\]\\() # Closing ] and opening (, end of the first catching group\n",
"[ \\t]* # Whitespace before the image path\n",
"([^\\) \\t]*) # Catching block with any character that is not ) or whitespace\n",
"(\\)| |\\t) # Catching group with closing ) or whitespace\n",
"| # OR\n",
"^(]*>) # Catching group with \n",
"\"\"\", re.MULTILINE | re.VERBOSE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"m=_re_image.search('![Alt](images/logo.png)')\n",
"test_eq(m.groups(), ('![Alt](', 'images/logo.png', ')', None))\n",
"# using ) or whitespace to close the group means we don't need a special case for captions\n",
"m=_re_image.search('![Alt](images/logo.png \"caption (something)\")')\n",
"test_eq(m.groups(), ('![Alt](', 'images/logo.png', '', None))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _img2jkl(d, h, jekyll=True):\n",
" if not jekyll: return ''\n",
" if 'width' in d: d['max-width'] = d.pop('width')\n",
" if 'src' in d: d['file'] = d.pop('src')\n",
" return '{% include image.html ' + h.attrs2str() + ' %}'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _is_real_image(src):\n",
" return not (src.startswith('http://') or src.startswith('https://') or src.startswith('data:image/'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def copy_images(cell, fname, dest, jekyll=True):\n",
" \"Copy images referenced in `cell` from `fname` parent folder to `dest` folder\"\n",
" def _rep_src(m):\n",
" grps = m.groups()\n",
" if grps[3] is not None:\n",
" h = HTMLParseAttrs()\n",
" dic = h(grps[3])\n",
" src = dic['src']\n",
" else: src = grps[1]\n",
" if _is_real_image(src):\n",
" os.makedirs((Path(dest)/src).parent, exist_ok=True)\n",
" shutil.copy(Path(fname).parent/src, Path(dest)/src)\n",
" src = Config().doc_baseurl + src\n",
" if grps[3] is not None:\n",
" dic['src'] = src\n",
" return _img2jkl(dic, h, jekyll=jekyll)\n",
" else: return f\"{grps[0]}{src}{grps[2]}\"\n",
" if cell['cell_type'] == 'markdown': cell['source'] = _re_image.sub(_rep_src, cell['source'])\n",
" return cell"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is to ensure that all images defined in `nbs_folder/images` and used in notebooks are copied over to `doc_folder/images`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"dest_img = Config().doc_path/'images'/'logo.png'\n",
"cell = {'cell_type': 'markdown', 'source':'Text\\n![Alt](images/logo.png)'}\n",
"try:\n",
" copy_images(cell, Path('01_export.ipynb'), Config().doc_path)\n",
" test_eq(cell[\"source\"], 'Text\\n![Alt](/images/logo.png)')\n",
" #Image has been copied\n",
" assert dest_img.exists()\n",
" cell = {'cell_type': 'markdown', 'source':'Text\\n![Alt](images/logo.png \"caption (something)\")'}\n",
" copy_images(cell, Path('01_export.ipynb'), Config().doc_path)\n",
" test_eq(cell[\"source\"], 'Text\\n![Alt](/images/logo.png \"caption (something)\")')\n",
"finally: dest_img.unlink()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"cell = {'cell_type': 'markdown', 'source':'Text\\n![Alt](https://site.logo.png)'}\n",
"copy_images(cell, Path('01_export.ipynb'), Config().doc_path)\n",
"test_eq(cell[\"source\"], 'Text\\n![Alt](https://site.logo.png)')\n",
"cell = {'cell_type': 'markdown', 'source':'Text\\n![Alt](https://site.logo.png \"caption\")'}\n",
"copy_images(cell, Path('01_export.ipynb'), Config().doc_path)\n",
"test_eq(cell[\"source\"], 'Text\\n![Alt](https://site.logo.png \"caption\")')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"cell = {'cell_type': 'markdown', 'source': 'Text\\n'}\n",
"try:\n",
" copy_images(cell, Path('01_export.ipynb'), Config().doc_path)\n",
" test_eq(cell[\"source\"], 'Text\\n{% include image.html alt=\"alt\" caption=\"cap\" max-width=\"600\" file=\"/images/logo.png\" %}')\n",
" assert dest_img.exists()\n",
"finally: dest_img.unlink()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"cell = {'cell_type': 'markdown', 'source': 'Text\\n'}\n",
"copy_images(cell, Path('01_export.ipynb'), Config().doc_path)\n",
"test_eq(cell[\"source\"], 'Text\\n{% include image.html alt=\"alt\" caption=\"cap\" max-width=\"600\" file=\"http://site.logo.png\" %}')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _relative_to(path1, path2):\n",
" p1,p2 = Path(path1).absolute().parts,Path(path2).absolute().parts\n",
" i=0\n",
" while i '}\n",
"cell1 = adapt_img_path(cell, Path('01_export.ipynb'), Path('.').absolute().parent)\n",
"test_eq(cell1['source'], 'Text\\n{% include image.html alt=\"Logo\" max-width=\"600\" file=\"nbs/images/logo.png\" %}')\n",
"\n",
"cell = {'cell_type': 'markdown', \n",
" 'source': 'Text\\n'}\n",
"cell1 = adapt_img_path(cell, Path('01_export.ipynb'), Path('.').absolute().parent)\n",
"test_eq(cell1['source'], 'Text\\n{% include image.html alt=\"Logo\" max-width=\"600\" file=\"https://site.image.png\" %}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Escape Latex in liquid"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_latex = re.compile(r'^(\\$\\$.*\\$\\$)$', re.MULTILINE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def escape_latex(cell):\n",
" if cell['cell_type'] != 'markdown': return cell\n",
" cell['source'] = _re_latex.sub(r'{% raw %}\\n\\1\\n{% endraw %}', cell['source'])\n",
" return cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cell = {'cell_type': 'markdown', \n",
" 'source': 'lala\\n$$equation$$\\nlala'}\n",
"cell = escape_latex(cell)\n",
"test_eq(cell['source'], 'lala\\n{% raw %}\\n$$equation$$\\n{% endraw %}\\nlala')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Collapsable Code Cells\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_cell_to_collapse_closed = _mk_flag_re('(collapse|collapse_hide|collapse-hide)', 0, \"Cell with #collapse or #collapse_hide\")\n",
"_re_cell_to_collapse_open = _mk_flag_re('(collapse_show|collapse-show)', 0, \"Cell with #collapse_show\")\n",
"_re_cell_to_collapse_output = _mk_flag_re('(collapse_output|collapse-output)', 0, \"Cell with #collapse_output\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def collapse_cells(cell):\n",
" \"Add a collapse button to inputs or outputs of `cell` in either the open or closed position\"\n",
" if check_re(cell, _re_cell_to_collapse_closed): upd_metadata(cell,'collapse_hide')\n",
" elif check_re(cell, _re_cell_to_collapse_open): upd_metadata(cell,'collapse_show')\n",
" elif check_re(cell, _re_cell_to_collapse_output): upd_metadata(cell,'collapse_output')\n",
" return cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"for flag in [\n",
" ('collapse_hide', '#collapse'),\n",
" ('collapse_hide', '# collapse_hide'),\n",
" ('collapse_hide', ' # collapse-hide'),\n",
" ('collapse_show', '#collapse_show'),\n",
" ('collapse_show', '#collapse-show'),\n",
" ('collapse_output', ' #collapse_output'),\n",
" ('collapse_output', '#collapse-output')]:\n",
" cell = nbformat.v4.new_code_cell(f'#comment\\n{flag[1]} \\ndef some_code')\n",
" test_eq(True, collapse_cells(cell)['metadata'][flag[0]])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"# check that we can't collapse both input and output\n",
"cell = nbformat.v4.new_code_cell(f'#hide-input\\n#collapse_output \\ndef some_code')\n",
"test_eq({'hide_input': True, 'collapse_output': True}, hide_cells(collapse_cells(cell))['metadata'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- Placing `#collapse_input open` in a code cell will inlcude your code under a collapsable element that is **open** by default."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"This code cell is not collapsed by default but you can collapse it to hide it from view!\n",
"Note that the output always shows with `%collapse_input`.\n"
]
}
],
"source": [
"#collapse_input open\n",
"print('This code cell is not collapsed by default but you can collapse it to hide it from view!')\n",
"print(\"Note that the output always shows with `%collapse_input`.\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- Placing `#collapse_input` in a code cell will include your code in a collapsable element that is **closed** by default. For example:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The code cell that produced this output is collapsed by default but you can expand it!\n"
]
}
],
"source": [
"#collapse_input\n",
"print('The code cell that produced this output is collapsed by default but you can expand it!')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"- Placing `#collapse_output` in a code cell will hide the output under a collapsable element that is **closed** by default."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The input of this cell is visible as usual.\n",
"However, the OUTPUT of this cell is collapsed by default but you can expand it!\n"
]
}
],
"source": [
"#collapse_output\n",
"print('The input of this cell is visible as usual.\\nHowever, the OUTPUT of this cell is collapsed by default but you can expand it!')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Preprocessing the list of cells"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following functions are applied to the entire list of cells of the notebook as a preprocessing step before the conversion to html."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_hide = _mk_flag_re('hide', 0, 'Cell with #hide')\n",
"_re_cell_to_remove = _mk_flag_re('(default_exp|exporti)', (0,1), 'Cell with #default_exp or #exporti')\n",
"_re_default_cls_lvl = _mk_flag_re('default_cls_lvl', 1, \"Cell with #default_cls_lvl\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def remove_hidden(cells):\n",
" \"Remove in `cells` the ones with a flag `#hide`, `#default_exp`, `#default_cls_lvl` or `#exporti`\"\n",
" _hidden = lambda c: check_re(c, _re_hide, code_only=False) or check_re(c, _re_cell_to_remove)\n",
" return L(cells).filter(_hidden, negate=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cells = [{'cell_type': 'code', 'source': source, 'hide': hide} for hide, source in [\n",
" (False, '# export\\nfrom local.core import *'), \n",
" (False, '# exporti mod file'), # Note: this used to get removed but we're more strict now\n",
" (True, '# hide\\nfrom local.core import *'),\n",
" (False, '# hide_input\\nfrom local.core import *'),\n",
" (False, '#exports\\nsuper code'),\n",
" (True, '#default_exp notebook.export'),\n",
" (False, 'show_doc(read_nb)'),\n",
" (False, '#hide (last test of to_concat)'),\n",
" (True, '# exporti\\n1 + 1')]] + [\n",
" {'cell_type': 'markdown', 'source': source, 'hide': hide} for hide, source in [\n",
" (False, '#hide_input\\nnice'), \n",
" (True, '#hide\\n\\nto hide')]]\n",
"\n",
"for a,b in zip([cell for cell in cells if not cell['hide']], remove_hidden(cells)):\n",
" test_eq(a,b)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def find_default_level(cells):\n",
" \"Find in `cells` the default class level.\"\n",
" res = L(cells).map_first(check_re_multi, pats=_re_default_cls_lvl)\n",
" return int(res.groups()[0]) if res else 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tst_nb = read_nb('00_export.ipynb')\n",
"test_eq(find_default_level(tst_nb['cells']), 3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_export = _mk_flag_re(\"exports?\", (0,1), \"Line with #export or #exports with or without module name\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def nb_code_cell(source):\n",
" \"A code cell (as a dict) containing `source`\"\n",
" return {'cell_type': 'code', 'execution_count': None, 'metadata': {}, 'outputs': [], 'source': source}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _show_doc_cell(name, cls_lvl=None):\n",
" return nb_code_cell(f\"show_doc({name}{'' if cls_lvl is None else f', default_cls_level={cls_lvl}'})\")\n",
"\n",
"def add_show_docs(cells, cls_lvl=None):\n",
" \"Add `show_doc` for each exported function or class\"\n",
" documented = []\n",
" for cell in cells:\n",
" m = check_re(cell, _re_show_doc)\n",
" if not m: continue\n",
" documented.append(m.group(1))\n",
"\n",
" def _documented(name): return name in documented\n",
"\n",
" for cell in cells:\n",
" res.append(cell)\n",
" if check_re(cell, _re_export):\n",
" for n in export_names(cell['source'], func_only=True):\n",
" if not _documented(n): res.insert(len(res)-1, _show_doc_cell(n, cls_lvl=cls_lvl))\n",
" return res"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _show_doc_cell(name, cls_lvl=None):\n",
" return nb_code_cell(f\"show_doc({name}{'' if cls_lvl is None else f', default_cls_level={cls_lvl}'})\")\n",
"\n",
"def add_show_docs(cells, cls_lvl=None):\n",
" \"Add `show_doc` for each exported function or class\"\n",
" documented = L(cells).map_filter(check_re, pat=_re_show_doc).map(Self.group(1))\n",
" res = []\n",
" for cell in cells:\n",
" res.append(cell)\n",
" if check_re(cell, _re_export):\n",
" for n in export_names(cell['source'], func_only=True):\n",
" if not n in documented: res.insert(len(res)-1, _show_doc_cell(n, cls_lvl=cls_lvl))\n",
" return res"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This only adds cells with a `show_doc` for non-documented functions, so if you add yourself a `show_doc` cell (because you want to change one of the default argument), there won't be any duplicates."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i,cell in enumerate(tst_nb['cells']):\n",
" if cell['source'].startswith('#export\\ndef read_nb'): break\n",
"tst_cells = [c.copy() for c in tst_nb['cells'][i-1:i+1]]\n",
"added_cells = add_show_docs(tst_cells, cls_lvl=3)\n",
"test_eq(len(added_cells), 3)\n",
"test_eq(added_cells[0], tst_nb['cells'][i-1])\n",
"test_eq(added_cells[2], tst_nb['cells'][i])\n",
"test_eq(added_cells[1], _show_doc_cell('read_nb', cls_lvl=3))\n",
"test_eq(added_cells[1]['source'], 'show_doc(read_nb, default_cls_level=3)')\n",
"\n",
"for flag in ['#export', '#exports']:\n",
" for show_doc_source in [\n",
" ('show_doc(my_func)', 'show_doc(my_func, title_level=3)')]:\n",
" #Check show_doc isn't added if it was already there.\n",
" tst_cells1 = [{'cell_type':'code', 'source': f'{flag}\\ndef my_func(x):\\n return x'},\n",
" {'cell_type':'code', 'source': show_doc_source[0]}]\n",
" test_eq(add_show_docs(tst_cells1), tst_cells1)\n",
" #Check show_doc is added\n",
" test_eq(len(add_show_docs(tst_cells1[:-1])), len(tst_cells1))\n",
" tst_cells1 = [{'cell_type':'code', 'source': f'{flag} with.mod\\ndef my_func(x):\\n return x'},\n",
" {'cell_type':'markdown', 'source': 'Some text'},\n",
" {'cell_type':'code', 'source': show_doc_source[1]}]\n",
" test_eq(add_show_docs(tst_cells1), tst_cells1)\n",
" #Check show_doc is added when using mod export\n",
" test_eq(len(add_show_docs(tst_cells1[:-1])), len(tst_cells1))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_fake_header = re.compile(r\"\"\"\n",
"# Matches any fake header (one that ends with -)\n",
"\\#+ # One or more #\n",
"\\s+ # One or more of whitespace\n",
".* # Any char\n",
"-\\s* # A dash followed by any number of white space\n",
"$ # End of text\n",
"\"\"\", re.VERBOSE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def remove_fake_headers(cells):\n",
" \"Remove in `cells` the fake header\"\n",
" return [c for c in cells if c['cell_type']=='code' or _re_fake_header.search(c['source']) is None]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can fake headers in your notebook to navigate them more easily with collapsible headers, just make them finish with a dash and they will be removed. One typical use case is to have a header of level 2 with the name of a class, since the `show_doc` cell of that class will create the same anchor, you need to have the one you created manually disappear to avoid any duplicate."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cells = [{'cell_type': 'markdown',\n",
" 'metadata': {},\n",
" 'source': '### Fake-'}] + tst_nb['cells'][:10]\n",
"cells1 = remove_fake_headers(cells)\n",
"test_eq(len(cells1), len(cells)-1)\n",
"test_eq(cells1[0], cells[1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def remove_empty(cells):\n",
" \"Remove in `cells` the empty cells\"\n",
" return [c for c in cells if len(c['source']) >0]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Grabbing metada"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_title_summary = re.compile(r\"\"\"\n",
"# Catches the title and summary of the notebook, presented as # Title > summary, with title in group 1 and summary in group 2\n",
"^\\s* # Beginning of text followe by any number of whitespace\n",
"\\#\\s+ # # followed by one or more of whitespace\n",
"([^\\n]*) # Catching group for any character except a new line\n",
"\\n+ # One or more new lines\n",
">[ ]* # > followed by any number of whitespace\n",
"([^\\n]*) # Catching group for any character except a new line\n",
"\"\"\", re.VERBOSE)\n",
"\n",
"_re_title_only = re.compile(r\"\"\"\n",
"# Catches the title presented as # Title without a summary\n",
"^\\s* # Beginning of text followe by any number of whitespace\n",
"\\#\\s+ # # followed by one or more of whitespace\n",
"([^\\n]*) # Catching group for any character except a new line\n",
"(?:\\n|$) # New line or end of text\n",
"\"\"\", re.VERBOSE)\n",
"\n",
"_re_properties = re.compile(r\"\"\"\n",
"^-\\s+ # Beginnig of a line followed by - and at least one space\n",
"(.*?) # Any pattern (shortest possible)\n",
"\\s*:\\s* # Any number of whitespace, :, any number of whitespace\n",
"(.*?)$ # Any pattern (shortest possible) then end of line\n",
"\"\"\", re.MULTILINE | re.VERBOSE)\n",
"\n",
"_re_mdlinks = re.compile(r\"\\[(.+)]\\((.+)\\)\", re.MULTILINE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _md2html_links(s):\n",
" 'Converts markdown links to html links'\n",
" return _re_mdlinks.sub(r\"\\1\", s)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def get_metadata(cells):\n",
" \"Find the cell with title and summary in `cells`.\"\n",
" for i,cell in enumerate(cells):\n",
" if cell['cell_type'] == 'markdown':\n",
" match = _re_title_summary.match(cell['source'])\n",
" if match:\n",
" cells.pop(i)\n",
" attrs = {k:v for k,v in _re_properties.findall(cell['source'])}\n",
" return {'keywords': 'fastai',\n",
" 'summary' : _md2html_links(match.groups()[1]),\n",
" 'title' : match.groups()[0],\n",
" **attrs}\n",
" elif _re_title_only.search(cell['source']) is not None:\n",
" title = _re_title_only.search(cell['source']).groups()[0]\n",
" cells.pop(i)\n",
" attrs = {k:v for k,v in _re_properties.findall(cell['source'])}\n",
" return {'keywords': 'fastai',\n",
" 'title' : title,\n",
" **attrs}\n",
"\n",
" return {'keywords': 'fastai',\n",
" 'title' : 'Title'}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"In the markdown cell with the title, you can add the summary as a block quote (just put an empty block quote for an empty summary) and a list with any additional metadata you would like to add, for instance:\n",
"```\n",
"# Title\n",
"\n",
"> Awesome summary\n",
"- toc: False\n",
"```\n",
"\n",
"The toc: False metadata will prevent the table of contents from showing on the page."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tst_nb = read_nb('00_export.ipynb')\n",
"test_eq(get_metadata(tst_nb['cells']), {\n",
" 'keywords': 'fastai',\n",
" 'summary': 'The functions that transform notebooks in a library',\n",
" 'title': 'Export to modules'})\n",
"\n",
"#The cell with the metada is popped out, so if we do it a second time we get the default.\n",
"test_eq(get_metadata(tst_nb['cells']), {'keywords': 'fastai', 'title' : 'Title'})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"#test with title only\n",
"test_eq(get_metadata([{'cell_type': 'markdown', 'source': '# Awesome title'}]), \n",
" {'keywords': 'fastai', 'title': 'Awesome title'})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"text = r\"\"\"\n",
"[This](https://nbdev.fast.ai) goes to docs.\n",
"This [one:here](00_export.ipynb) goes to a local nb. \n",
"\\n[And-this](http://dev.fast.ai/) goes to fastai docs\n",
"\"\"\"\n",
"res = \"\"\"\n",
"This goes to docs.\\nThis one:here goes to a local nb. \\n\\\\nAnd-this goes to fastai docs\n",
"\"\"\"\n",
"test_eq(_md2html_links(text), res)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"cells = [{'cell_type': 'markdown', 'source': \"# Title\\n\\n> s\\n\\n- toc: false\"}]\n",
"test_eq(get_metadata(cells), {'keywords': 'fastai', 'summary': 's', 'title': 'Title', 'toc': 'false'})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Executing show_doc cells"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_mod_export = _mk_flag_re(\"export[s]?\", 1,\n",
" \"Matches any line with #export or #exports with a module name and catches it in group 1\")\n",
"\n",
"def _gather_export_mods(cells):\n",
" res = []\n",
" for cell in cells:\n",
" tst = check_re(cell, _re_mod_export)\n",
" if tst is not None: res.append(tst.groups()[0])\n",
" return res"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"cells = [\n",
" {'cell_type': 'markdown', 'source': '#export ignored'},\n",
" {'cell_type': 'code', 'source': '#export'},\n",
" {'cell_type': 'code', 'source': '#export normal'},\n",
" {'cell_type': 'code', 'source': '# exports show'},\n",
" {'cell_type': 'code', 'source': '# exporti hidden'},\n",
" {'cell_type': 'code', 'source': '#export\\n@call_parse'},\n",
" {'cell_type': 'code', 'source': '#export \\n@delegates(concurrent.futures.ProcessPoolExecutor)'}\n",
"]\n",
"\n",
"test_eq(_gather_export_mods(cells), ['normal', 'show'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"# match any cell containing a zero indented import from the current lib\n",
"_re_lib_import = ReLibName(r\"^from LIB_NAME\\.\", re.MULTILINE)\n",
"# match any cell containing a zero indented import\n",
"_re_import = re.compile(r\"^from[ \\t]+\\S+[ \\t]+import|^import[ \\t]\", re.MULTILINE)\n",
"# match any cell containing a zero indented call to notebook2script\n",
"_re_notebook2script = re.compile(r\"^notebook2script\\(\", re.MULTILINE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"for cell in [nbformat.v4.new_code_cell(s, metadata={'exp': exp}) for exp,s in [\n",
" (True, 'show_doc(Tensor.p)'),\n",
" (True, ' show_doc(Tensor.p)'),\n",
" (True, 'if somthing:\\n show_doc(Tensor.p)'),\n",
" (False, '# show_doc(Tensor.p)'),\n",
" (True, '# comment \\n show_doc(Tensor.p)'),\n",
" (True, '\"\"\"\\nshow_doc(Tensor.p)\\n\"\"\"'),\n",
" (True, 'import torch\\nshow_doc(Tensor.p)'),\n",
" (False,'class Ex(ExP):\\n\"An `ExP` that ...\"\\ndef preprocess_cell(self, cell, resources, index):\\n'),\n",
" (False, 'from somewhere import something'),\n",
" (False, 'from '),\n",
" (False, 'import re'),\n",
" (False, 'import '),\n",
" (False, 'try: from PIL import Image\\except: pass'),\n",
" (False, 'from PIL import Image\\n@patch\\ndef p(x:Image):\\n pass'),\n",
" (False, '@patch\\ndef p(x:Image):\\n pass\\nfrom PIL import Image')]]:\n",
" exp = cell.metadata.exp\n",
" assert exp == bool(check_re_multi(cell, [_re_show_doc, _re_lib_import.re])), f'expected {exp} for {cell}'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"for cell in [nbformat.v4.new_code_cell(s, metadata={'exp': exp}) for exp,s in [\n",
" (False, 'show_doc(Tensor.p)'),\n",
" (True, 'import torch\\nshow_doc(Tensor.p)'),\n",
" (False,'class Ex(ExP):\\n\"An `ExP` that ...\"\\ndef preprocess_cell(self, cell, resources, index):\\n'),\n",
" (False, ' from somewhere import something'),\n",
" (True, 'from somewhere import something'),\n",
" (False, 'from '),\n",
" (False, 'select * \\nfrom database'),\n",
" (False, ' import re'),\n",
" (True, 'import re'),\n",
" (True, 'import '),\n",
" (False, 'try: from PIL import Image\\except: pass'),\n",
" (True, 'from PIL import Image\\n@patch\\ndef p(x:Image):\\n pass'),\n",
" (True, '@patch\\ndef p(x:Image):\\n pass\\nfrom PIL import Image')]]:\n",
" exp = cell.metadata.exp\n",
" assert exp == bool(check_re(cell, _re_import)), f'expected {exp} for {cell}'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"for cell in [nbformat.v4.new_code_cell(s, metadata={'exp': exp}) for exp,s in [\n",
" (False, 'show_doc(Tensor.p)'),\n",
" (False, 'notebook2script'),\n",
" (False, '#notebook2script()'),\n",
" (True, 'notebook2script()'),\n",
" (True, 'notebook2script(anything at all)')]]:\n",
" exp = cell.metadata.exp\n",
" assert exp == bool(check_re(cell, _re_notebook2script)), f'expected {exp} for {cell}'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _non_comment_code(s):\n",
" if re.match(r'\\s*#', s): return False\n",
" if _re_import.findall(s) or _re_lib_import.re.findall(s): return False\n",
" return re.match(r'\\s*\\w', s)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"class ExecuteShowDocPreprocessor(ExecutePreprocessor):\n",
" \"An `ExecutePreprocessor` that only executes `show_doc` and `import` cells\"\n",
" def preprocess_cell(self, cell, resources, index):\n",
" if not check_re(cell, _re_notebook2script):\n",
" if check_re(cell, _re_show_doc):\n",
" return super().preprocess_cell(cell, resources, index)\n",
" elif check_re_multi(cell, [_re_import, _re_lib_import.re]):\n",
" if check_re_multi(cell, [_re_export, 'show_doc', '^\\s*#\\s*import']):\n",
"# r = list(filter(_non_comment_code, cell['source'].split('\\n')))\n",
"# if r: print(\"You have import statements mixed with other code\", r)\n",
" return super().preprocess_cell(cell, resources, index)\n",
"# try: return super().preprocess_cell(cell, resources, index)\n",
"# except: pass\n",
" return cell, resources"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Cells containing:\n",
"- a zero indented call to `notebook2script`\n",
"\n",
"are not run while building docs. This avoids failures caused by importing empty or partially built modules.\n",
"\n",
"Cells containing:\n",
"- `show_doc` (which could be indented) or\n",
"- a \"library import\" (zero indent import from current library) e.g. `from LIB_NAME.core import *`\n",
"\n",
"are executed and must run without error. If running these cells raises an exception, the build will stop.\n",
"\n",
"Cells containing zero indented imports. e.g.\n",
"- `from module import *` or\n",
"- `import module`\n",
"\n",
"are executed but errors will not stop the build.\n",
"\n",
"If you need to `show_doc` something, please make sure it's imported via a cell that does not depend on previous cells being run. The easiest way to do this is to use a cell that contains nothing but imports."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _import_show_doc_cell(mods=None):\n",
" \"Add an import show_doc cell.\"\n",
" source = f\"from nbdev.showdoc import show_doc\"\n",
" if mods is not None:\n",
" for mod in mods: source += f\"\\nfrom {Config().lib_name}.{mod} import *\"\n",
" return {'cell_type': 'code',\n",
" 'execution_count': None,\n",
" 'metadata': {'hide_input': True},\n",
" 'outputs': [],\n",
" 'source': source}\n",
"\n",
"def execute_nb(nb, mod=None, metadata=None, show_doc_only=True):\n",
" \"Execute `nb` (or only the `show_doc` cells) with `metadata`\"\n",
" mods = ([] if mod is None else [mod]) + _gather_export_mods(nb['cells'])\n",
" nb['cells'].insert(0, _import_show_doc_cell(mods))\n",
" ep_cls = ExecuteShowDocPreprocessor if show_doc_only else ExecutePreprocessor\n",
" ep = ep_cls(timeout=600, kernel_name='python3')\n",
" metadata = metadata or {}\n",
" pnb = nbformat.from_dict(nb)\n",
" ep.preprocess(pnb, metadata)\n",
" return pnb"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Converting bibtex citations"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_cite = re.compile(r\"(\\\\cite{)([^}]*)(})\", re.MULTILINE | re.VERBOSE) # Catches citations used with `\\cite{}`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _textcite2link(text):\n",
" citations = _re_cite.finditer(text)\n",
" out = []\n",
" start_pos = 0\n",
" for cit_group in citations:\n",
" cit_pos_st = cit_group.span()[0]\n",
" cit_pos_fin = cit_group.span()[1]\n",
" out.append(text[start_pos:cit_pos_st])\n",
" out.append('[')\n",
" cit_group = cit_group[2].split(',')\n",
" for i, cit in enumerate(cit_group):\n",
" cit=cit.strip()\n",
" out.append(f\"\"\"{cit}\"\"\")\n",
" if i != len(cit_group) - 1:\n",
" out.append(',')\n",
" out.append(']')\n",
" start_pos = cit_pos_fin\n",
" out.append(text[start_pos:])\n",
" return ''.join(out)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def cite2link(cell):\n",
" '''Creates links from \\cite{} to Refenrence section generated by jupyter_latex_envs'''\n",
" if cell['cell_type'] == 'markdown': cell['source'] = _textcite2link(cell['source'])\n",
" return cell"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"jupyter_latex_envs is a jupyter extension https://github.com/jfbercher/jupyter_latex_envs.\n",
"\n",
"You can find relevant section [here](https://rawgit.com/jfbercher/jupyter_latex_envs/master/src/latex_envs/static/doc/latex_env_doc.html#Bibliography)\n",
"Note, that nbdev now only supports `\\cite{}` conversion and not the rest, e.g., `\\figure{}` and so on."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"cell = {'cell_type': 'markdown', 'source': r\"\"\"This is cited multireference \\cite{Frob1, Frob3}.\n",
"And single \\cite{Frob2}.\"\"\"}\n",
"expected=r\"\"\"This is cited multireference [Frob1,Frob3].\n",
"And single [Frob2].\"\"\"\n",
"test_eq(cite2link(cell)[\"source\"], expected)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"It's important to execute all `show_doc` cells before exporting the notebook to html because some of them have just been added automatically or others could have outdated links."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#slow\n",
"fake_nb = {k:v for k,v in tst_nb.items() if k != 'cells'}\n",
"fake_nb['cells'] = [tst_nb['cells'][0].copy()] + added_cells\n",
"fake_nb = execute_nb(fake_nb, mod='export')\n",
"assert len(fake_nb['cells'][-2]['outputs']) > 0"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Filling templates"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following functions automatically adds jekyll templates if they are misssing."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def write_tmpl(tmpl, nms, cfg, dest):\n",
" \"Write `tmpl` to `dest` (if missing) filling in `nms` in template using dict `cfg`\"\n",
" if dest.exists(): return\n",
" vs = {o:cfg.d[o] for o in nms.split()}\n",
" outp = tmpl.format(**vs)\n",
" dest.write_text(outp)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def write_tmpls():\n",
" \"Write out _config.yml and _data/topnav.yml using templates\"\n",
" cfg = Config()\n",
" path = Path(cfg.get('doc_src_path', cfg.doc_path))\n",
" write_tmpl(config_tmpl, 'user lib_name title copyright description', cfg, path/'_config.yml')\n",
" write_tmpl(topnav_tmpl, 'host git_url', cfg, path/'_data'/'topnav.yml')\n",
" write_tmpl(makefile_tmpl, 'nbs_path lib_name', cfg, cfg.config_file.parent/'Makefile')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Conversion"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"__file__ = Config().lib_path/'export2html.py'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def nbdev_exporter(cls=HTMLExporter, template_file=None):\n",
" cfg = traitlets.config.Config()\n",
" exporter = cls(cfg)\n",
" exporter.exclude_input_prompt=True\n",
" exporter.exclude_output_prompt=True\n",
" exporter.anchor_link_text = ' '\n",
" exporter.template_file = 'jekyll.tpl' if template_file is None else template_file\n",
" exporter.template_path.append(str(Path(__file__).parent/'templates'))\n",
" return exporter"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"process_cells = [remove_fake_headers, remove_hidden, remove_empty]\n",
"process_cell = [hide_cells, collapse_cells, remove_widget_state, add_jekyll_notes, escape_latex, cite2link]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _nb2htmlfname(nb_path, dest=None):\n",
" if dest is None: dest = Config().doc_path\n",
" return Path(dest)/re_digits_first.sub('', nb_path.with_suffix('.html').name)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"test_eq(_nb2htmlfname(Path('00a_export.ipynb')), Config().doc_path/'export.html')\n",
"test_eq(_nb2htmlfname(Path('export.ipynb')), Config().doc_path/'export.html')\n",
"test_eq(_nb2htmlfname(Path('00ab_export_module_1.ipynb')), Config().doc_path/'export_module_1.html')\n",
"test_eq(_nb2htmlfname(Path('export.ipynb'), '.'), Path('export.html'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def convert_nb(fname, cls=HTMLExporter, template_file=None, exporter=None, dest=None, execute=True):\n",
" \"Convert a notebook `fname` to html file in `dest_path`.\"\n",
" fname = Path(fname).absolute()\n",
" nb = read_nb(fname)\n",
" meta_jekyll = get_metadata(nb['cells'])\n",
" meta_jekyll['nb_path'] = str(fname.relative_to(Config().lib_path.parent))\n",
" cls_lvl = find_default_level(nb['cells'])\n",
" mod = find_default_export(nb['cells'])\n",
" nb['cells'] = compose(*process_cells,partial(add_show_docs, cls_lvl=cls_lvl))(nb['cells'])\n",
" _func = compose(partial(copy_images, fname=fname, dest=Config().doc_path), *process_cell, treat_backticks)\n",
" nb['cells'] = [_func(c) for c in nb['cells']]\n",
" if execute: nb = execute_nb(nb, mod=mod)\n",
" nb['cells'] = [clean_exports(c) for c in nb['cells']]\n",
" if exporter is None: exporter = nbdev_exporter(cls=cls, template_file=template_file)\n",
" with open(_nb2htmlfname(fname, dest=dest),'w') as f:\n",
" f.write(exporter.from_notebook_node(nb, resources=meta_jekyll)[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _notebook2html(fname, cls=HTMLExporter, template_file=None, exporter=None, dest=None, execute=True):\n",
" time.sleep(random.random())\n",
" print(f\"converting: {fname}\")\n",
" try:\n",
" convert_nb(fname, cls=cls, template_file=template_file, exporter=exporter, dest=dest, execute=execute)\n",
" return True\n",
" except Exception as e:\n",
" print(e)\n",
" return False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def notebook2html(fname=None, force_all=False, n_workers=None, cls=HTMLExporter, template_file=None,\n",
" exporter=None, dest=None, pause=0, execute=True):\n",
" \"Convert all notebooks matching `fname` to html files\"\n",
" if fname is None:\n",
" files = [f for f in Config().nbs_path.glob('**/*.ipynb')\n",
" if not f.name.startswith('_') and not '/.' in f.as_posix()]\n",
" else:\n",
" p = Path(fname)\n",
" files = list(p.parent.glob(p.name))\n",
" if len(files)==1:\n",
" force_all = True\n",
" if n_workers is None: n_workers=0\n",
" if not force_all:\n",
" # only rebuild modified files\n",
" files,_files = [],files.copy()\n",
" for fname in _files:\n",
" fname_out = _nb2htmlfname(Path(fname).absolute(), dest=dest)\n",
" if not fname_out.exists() or os.path.getmtime(fname) >= os.path.getmtime(fname_out):\n",
" files.append(fname)\n",
" if len(files)==0: print(\"No notebooks were modified\")\n",
" else:\n",
" passed = parallel(_notebook2html, files, n_workers=n_workers, cls=cls,\n",
" template_file=template_file, exporter=exporter, dest=dest, pause=pause, execute=execute)\n",
" if not all(passed):\n",
" msg = \"Conversion failed on the following:\\n\"\n",
" print(msg + '\\n'.join([f.name for p,f in zip(passed,files) if not p]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hide_input": false
},
"outputs": [
{
"data": {
"text/html": [],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"converting: 01_sync.ipynb\n"
]
}
],
"source": [
"#hide\n",
"# Test when an argument is given to notebook2html\n",
"p1 = Path('/tmp/sync.html')\n",
"if p1.exists(): p1.unlink()\n",
"notebook2html('01_sync.ipynb', dest='/tmp');\n",
"assert p1.exists()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hide_input": false
},
"outputs": [
{
"data": {
"text/html": [],
"text/plain": [
""
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"converting: /home/jhoward/git/nbdev/nbs/03_export2html.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/06_cli.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/05_merge.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/00_export.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/04_test.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/07_clean.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/01_sync.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/index.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/tutorial.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/99_search.ipynb\n",
"converting: /home/jhoward/git/nbdev/nbs/02_showdoc.ipynb\n"
]
}
],
"source": [
"#slow\n",
"#hide\n",
"# Test when no argument is given to notebook2html\n",
"dest_files = [_nb2htmlfname(f, dest='/tmp') for f in Config().nbs_path.glob('**/*.ipynb')\n",
" if not f.name.startswith('_') and not 'checkpoint' in f.name]\n",
"[f.unlink() for f in dest_files if f.exists()]\n",
"notebook2html(fname=None, dest='/tmp');\n",
"for f in dest_files: assert f.exists(), f"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hide_input": false
},
"outputs": [],
"source": [
"#hide\n",
"# # Test Error handling\n",
"# try: notebook2html('../README.md');\n",
"# except Exception as e: pass\n",
"# else: assert False, 'An error should be raised when a non-notebook file is passed to notebook2html!'"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Hide cells starting with `#export` and only leaves the prose and the tests. If `fname` is not specified, this will convert all notebooks not beginning with an underscore in the `nb_folder` defined in `setting.ini`. Otherwise `fname` can be a single filename or a glob expression.\n",
"\n",
"By default, only the notebooks that are more recent than their html counterparts are modified, pass `force_all=True` to change that behavior."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"#notebook2html(force_all=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def convert_md(fname, dest_path, img_path='docs/images/', jekyll=True):\n",
" \"Convert a notebook `fname` to a markdown file in `dest_path`.\"\n",
" fname = Path(fname).absolute()\n",
" if not img_path: img_path = fname.stem + '_files/'\n",
" Path(img_path).mkdir(exist_ok=True, parents=True)\n",
" nb = read_nb(fname)\n",
" meta_jekyll = get_metadata(nb['cells'])\n",
" try: meta_jekyll['nb_path'] = str(fname.relative_to(Config().lib_path.parent))\n",
" except: meta_jekyll['nb_path'] = str(fname)\n",
" nb['cells'] = compose(*process_cells)(nb['cells'])\n",
" nb['cells'] = [compose(partial(adapt_img_path, fname=fname, dest=dest_path, jekyll=jekyll), *process_cell)(c)\n",
" for c in nb['cells']]\n",
" fname = Path(fname).absolute()\n",
" dest_name = fname.with_suffix('.md').name\n",
" exp = nbdev_exporter(cls=MarkdownExporter, template_file='jekyll-md.tpl' if jekyll else 'md.tpl')\n",
" export = exp.from_notebook_node(nb, resources=meta_jekyll)\n",
" md = export[0]\n",
" for ext in ['png', 'svg']:\n",
" md = re.sub(r'!\\['+ext+'\\]\\((.+)\\)', '!['+ext+'](' + img_path + '\\\\1)', md)\n",
" with (Path(dest_path)/dest_name).open('w') as f: f.write(md)\n",
" if hasattr(export[1]['outputs'], 'items'):\n",
" for n,o in export[1]['outputs'].items():\n",
" with open(Path(dest_path)/img_path/n, 'wb') as f: f.write(o)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is used to convert the index into the `README.md`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"def _test_md(fn):\n",
" fn,dest = Path(fn),Path().absolute().parent\n",
" try: convert_md(fn, dest, jekyll=False)\n",
" finally: (dest/f'{fn.stem}.md').unlink()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"_test_md('index.ipynb')\n",
"# `export[1]['outputs']` will be a `str` if the notebook has no markdown cells to convert.\n",
"# e.g. the nb could have a single jekyll markdown cell or just code cells ...\n",
"_test_md(f'../tests/single-cell-index.ipynb')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_att_ref = re.compile(r' *!\\[(.*)\\]\\(attachment:image.png(?: \"(.*)\")?\\)')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"t = '![screenshot](attachment:image.png)'\n",
"test_eq(_re_att_ref.match(t).groups(), ('screenshot', None))\n",
"\n",
"t = '![screenshot](attachment:image.png \"Deploying to Binder\")'\n",
"test_eq(_re_att_ref.match(t).groups(), ('screenshot', \"Deploying to Binder\"))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"try: from PIL import Image\n",
"except: pass # Only required for _update_att_ref"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_tmpl_img = ''\n",
"\n",
"def _update_att_ref(line, path, img):\n",
" m = _re_att_ref.match(line)\n",
" if not m: return line\n",
" alt,title = m.groups()\n",
" w = img.size[0]\n",
" if alt=='screenshot': w //= 2\n",
" if not title: title = \"TK: add title\"\n",
" return _tmpl_img.format(title=title, width=str(w), id='TK: add it', name=str(path))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _nb_detach_cell(cell, dest, use_img):\n",
" att,src = cell['attachments'],cell['source']\n",
" mime,img = first(first(att.values()).items())\n",
" ext = mime.split('/')[1]\n",
" for i in range(99999):\n",
" p = dest/(f'att_{i:05d}.{ext}')\n",
" if not p.exists(): break\n",
" img = b64decode(img)\n",
" p.write_bytes(img)\n",
" del(cell['attachments'])\n",
" if use_img: return [_update_att_ref(o,p,Image.open(p)) for o in src]\n",
" else: return [o.replace('attachment:image.png', str(p)) for o in src]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def nb_detach_cells(path_nb, dest=None, replace=True, use_img=False):\n",
" \"Export cell attachments to `dest` and update references\"\n",
" path_nb = Path(path_nb)\n",
" if not dest: dest = f'{path_nb.stem}_files'\n",
" dest = Path(dest)\n",
" dest.mkdir(exist_ok=True, parents=True)\n",
" j = json.load(path_nb.open())\n",
" atts = [o for o in j['cells'] if 'attachments' in o]\n",
" for o in atts: o['source'] = _nb_detach_cell(o, dest, use_img)\n",
" if atts and replace: json.dump(j, path_nb.open('w'))\n",
" if not replace: return j"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Sidebar"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"import time,random,warnings"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _leaf(k,v):\n",
" url = 'external_url' if \"http\" in v else 'url'\n",
" #if url=='url': v=v+'.html'\n",
" return {'title':k, url:v, 'output':'web,pdf'}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_k_names = ['folders', 'folderitems', 'subfolders', 'subfolderitems']\n",
"def _side_dict(title, data, level=0):\n",
" k_name = _k_names[level]\n",
" level += 1\n",
" res = [(_side_dict(k, v, level) if isinstance(v,dict) else _leaf(k,v))\n",
" for k,v in data.items()]\n",
" return ({k_name:res} if not title\n",
" else res if title.startswith('empty')\n",
" else {'title': title, 'output':'web', k_name: res})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_catch_title = re.compile('^title\\s*:\\s*(\\S+.*)$', re.MULTILINE)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _get_title(fname):\n",
" \"Grabs the title of html file `fname`\"\n",
" with open(fname, 'r') as f: code = f.read()\n",
" src = _re_catch_title.search(code)\n",
" return fname.stem if src is None else src.groups()[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"test_eq(_get_title(Config().doc_path/'export.html'), \"Export to modules\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def _create_default_sidebar():\n",
" \"Create the default sidebar for the docs website\"\n",
" dic = {\"Overview\": \"/\"}\n",
" files = [f for f in Config().nbs_path.glob('**/*.ipynb') if not f.name.startswith('_')]\n",
" fnames = [_nb2htmlfname(f) for f in sorted(files)]\n",
" titles = [_get_title(f) for f in fnames if 'index' not in f.stem!='index']\n",
" if len(titles) > len(set(titles)): print(f\"Warning: Some of your Notebooks use the same title ({titles}).\")\n",
" dic.update({_get_title(f):f'{f.name}' for f in fnames if f.stem!='index'})\n",
" return dic"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def create_default_sidebar():\n",
" \"Create the default sidebar for the docs website\"\n",
" dic = {Config().lib_name: _create_default_sidebar()}\n",
" json.dump(dic, open(Config().doc_path/'sidebar.json', 'w'), indent=2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The default sidebar lists all html pages with their respective title, except the index that is named \"Overview\". To build a custom sidebar, set the flag `custom_sidebar` in your `settings.ini` to `True` then change the `sidebar.json` file in the `doc_folder` to your liking. Otherwise, the sidebar is updated at each doc build."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def make_sidebar():\n",
" \"Making sidebar for the doc website form the content of `doc_folder/sidebar.json`\"\n",
" cfg = Config()\n",
" if not (cfg.doc_path/'sidebar.json').exists() or cfg.get('custom_sidebar', 'False') == 'False':\n",
" create_default_sidebar()\n",
" sidebar_d = json.load(open(cfg.doc_path/'sidebar.json', 'r'))\n",
" res = _side_dict('Sidebar', sidebar_d)\n",
" res = {'entries': [res]}\n",
" res_s = yaml.dump(res, default_flow_style=False)\n",
" res_s = res_s.replace('- subfolders:', ' subfolders:').replace(' - - ', ' - ')\n",
" res_s = f\"\"\"\n",
"#################################################\n",
"### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###\n",
"#################################################\n",
"# Instead edit {'../../sidebar.json'}\n",
"\"\"\"+res_s\n",
" open(cfg.doc_path/'_data/sidebars/home_sidebar.yml', 'w').write(res_s)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Export-"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Converted 00_export.ipynb.\n",
"Converted 01_sync.ipynb.\n",
"Converted 02_showdoc.ipynb.\n",
"Converted 03_export2html.ipynb.\n",
"Converted 04_test.ipynb.\n",
"Converted 05_merge.ipynb.\n",
"Converted 06_cli.ipynb.\n",
"Converted 07_clean.ipynb.\n",
"Converted 99_search.ipynb.\n",
"Converted index.ipynb.\n",
"Converted tutorial.ipynb.\n"
]
}
],
"source": [
"#hide\n",
"from nbdev.export import *\n",
"notebook2script()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"jupytext": {
"split_at_heading": true
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}