{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Compare two versions of an archived web page\n", "\n", "[View in GitHub](https://github.com/GLAM-Workbench/web-archives/blob/master/show_diffs.ipynb) · [View in GLAM Workbench](https://glam-workbench.net/web-archives/#compare-two-versions-of-an-archived-web-page)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# This notebook is designed to run in Voila as an app (with the code hidden).\n", "# To launch this notebook in Voila, just select 'View > Open with Voila in New Browser Tab'\n", "# Your browser might ask for permission to open the new tab as a popup." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This notebook demonstrates a number of different ways of comparing versions of archived web pages. Just choose a repository, enter a url, and select two dates to see comparisons based on:\n", "\n", "" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import base64\n", "import io\n", "import math\n", "import os\n", "import re\n", "import time\n", "from difflib import HtmlDiff\n", "\n", "# from webdriverdownloader import GeckoDriverDownloader\n", "from pathlib import Path\n", "from urllib.parse import parse_qs, quote, urlparse\n", "\n", "import arrow\n", "import geckodriver_autoinstaller\n", "import ipywidgets as widgets\n", "import jsons\n", "import pandas as pd\n", "import PIL\n", "import requests\n", "import selenium\n", "import trafilatura as tf\n", "from bs4 import BeautifulSoup\n", "from IPython.display import HTML, display\n", "from PIL import Image\n", "from selenium import webdriver\n", "from selenium.webdriver.common.by import By\n", "from sklearn.feature_extraction.text import TfidfVectorizer\n", "from slugify import slugify\n", "\n", "# gdd = GeckoDriverDownloader()\n", "# geckodriver = gdd.download_and_install(\"v0.30.0\")[1]\n", "\n", "geckodriver_autoinstaller.install()\n", "\n", "# Add styles for the diff\n", "HTML(\n", " \"\"\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def is_memento(url):\n", " \"\"\"\n", " Is this url a Memento? Checks for the presence of a timestamp.\n", " \"\"\"\n", " return bool(re.search(r\"/(\\d{12}|\\d{14})(?:id_|mp_|if_)*/http\", url))\n", "\n", "\n", "def get_timestamp(url):\n", " \"\"\"\n", " Extract the timestamp from a Memento\n", " \"\"\"\n", " return re.search(r\"/(\\d{12}|\\d{14})(?:if_|mp_|id_)*/\", url).group(1)\n", "\n", "\n", "def get_dates(page_data):\n", " \"\"\"\n", " Return formatted dates of the saved pages.\n", " \"\"\"\n", " dates = []\n", " for capture in page_data:\n", " dates.append(format_date_from_timestamp(capture[\"url\"]))\n", " return dates\n", "\n", "\n", "def get_html(url):\n", " \"\"\"\n", " Retrieve the original HTML content of an archived page.\n", " Follow redirects if they go to another archived page.\n", " Return the (possibly redirected) url from the response and the HTML content.\n", " \"\"\"\n", " # Adding the id_ hint tells the archive to give us the original harvested version, without any rewriting.\n", " url = re.sub(r\"/(\\d{12}|\\d{14})(?:mp_)*/http\", r\"/\\1id_/http\", url)\n", " response = requests.get(url, allow_redirects=True)\n", " # Some captures might redirect themselves to live versions\n", " # If the redirected url doesn't look like a Memento rerun this without redirection\n", " if not is_memento(response.url):\n", " response = requests.get(url, allow_redirects=False)\n", " return {\"url\": response.url, \"html\": response.content}\n", "\n", "\n", "def get_all_text(capture_data):\n", " \"\"\"\n", " Get all the human visible text from a web page, including headers, footers, and navigation.\n", " Does some cleaning up to remove multiple spaces, tabs, and newlines.\n", " \"\"\"\n", " try:\n", " text = BeautifulSoup(capture_data[\"html\"]).get_text()\n", " except TypeError:\n", " return None\n", " else:\n", " # Remove multiple newlines\n", " text = re.sub(r\"\\n\\s*\\n\", \"\\n\\n\", text)\n", " # Remove multiple spaces or tabs with a single space\n", " text = re.sub(r\"( |\\t){2,}\", \" \", text)\n", " # Remove leading spaces\n", " text = re.sub(r\"\\n \", \"\\n\", text)\n", " # Remove leading newlines\n", " text = re.sub(r\"^\\n*\", \"\", text)\n", " return text\n", "\n", "\n", "def get_main_text(capture_data):\n", " \"\"\"\n", " Get only the main text from a page, excluding boilerplate and navigation.\n", " \"\"\"\n", " text = tf.extract(capture_data[\"html\"])\n", " return text\n", "\n", "\n", "def load_data(urls):\n", " \"\"\"\n", " Load all the content of the specified urls into the page_data list.\n", " Add in the text and main text.\n", " \"\"\"\n", " global page_data\n", " for i, url in enumerate(urls):\n", " # Get the HTML of the archives page\n", " page_data.append(get_html(url))\n", " for capture in page_data:\n", " # Add the human-readable text\n", " capture[\"text\"] = get_all_text(capture)\n", " # Add the main text\n", " capture[\"main_text\"] = get_main_text(capture)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# METADATA\n", "\n", "\n", "def get_page_metadata(html):\n", " \"\"\"\n", " Get the metadata from a page extracted by Trafilatura\n", " \"\"\"\n", " return jsons.dump(tf.metadata.extract_metadata(html))\n", "\n", "\n", "def get_metadata(page_data):\n", " \"\"\"\n", " Get metadata from all saved pages.\n", " \"\"\"\n", " metadata = []\n", " for capture in page_data:\n", " metadata.append(get_page_metadata(capture[\"html\"]))\n", " return metadata\n", "\n", "\n", "def display_metadata(page_data):\n", " \"\"\"\n", " Display the extracted metadata.\n", " \"\"\"\n", " metadata = get_metadata(page_data)\n", " # Get the formatted dates of the two pages\n", " dates = get_dates(page_data)\n", " # Use Pandas to make nice tables, using the dates as index\n", " df = pd.DataFrame(metadata, index=dates)\n", " with md_out:\n", " display(HTML(\"

Metadata

\"))\n", " display(df)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# STATISTICS\n", "\n", "\n", "def size_in_bytes(html):\n", " \"\"\"\n", " The HTML should already be a bytes string, so len should give us the number of bytes.\n", " \"\"\"\n", " return len(html)\n", "\n", "\n", "def number_of_words(text):\n", " \"\"\"\n", " Split text on whitespace and count resulting words.\n", " (Might include some punctuation as well.)\n", " \"\"\"\n", " try:\n", " words = len(text.split())\n", " except AttributeError:\n", " words = 0\n", " return words\n", "\n", "\n", "def get_summary_data(capture):\n", " \"\"\"\n", " Compile some summary statistics about a page.\n", " \"\"\"\n", " summary = {\n", " \"File size (bytes)\": size_in_bytes(capture[\"html\"]),\n", " \"Number of words (all text)\": number_of_words(capture[\"text\"]),\n", " \"Number of words (main text)\": number_of_words(capture[\"main_text\"]),\n", " }\n", " return summary\n", "\n", "\n", "def get_summaries(page_data):\n", " \"\"\"\n", " Get summaries of all the saved pages.\n", " \"\"\"\n", " summaries = []\n", " for capture in page_data:\n", " summaries.append(get_summary_data(capture))\n", " return summaries\n", "\n", "\n", "def display_summaries(page_data):\n", " \"\"\"\n", " Display the summaries using Pandas.\n", " \"\"\"\n", " summaries = get_summaries(page_data)\n", " dates = get_dates(page_data)\n", " df = pd.DataFrame(summaries, index=dates)\n", " with stats_out:\n", " display(HTML(\"

Statistics

\"))\n", " # Include thousands separator\n", " display(df.head().style.format(\"{:,.0f}\"))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# LINKS\n", "\n", "\n", "def link_is_local(site, href):\n", " \"\"\"\n", " Check to see if a link is internal or external by looking to see if it includes the current domain.\n", " \"\"\"\n", " # Relative urls will always be local of course\n", " if href.startswith(\"http\") and site not in href.lower():\n", " return False\n", " else:\n", " return True\n", "\n", "\n", "def get_site_from_url(url):\n", " # Get the current domain from the url\n", " site = re.search(\n", " r\"(\\d{12}|\\d{14})(?:id_)*/https*://(?:.*@){0,1}(.*?)(?:\\:\\d*){0,1}/\", url\n", " ).group(1)\n", " # Remove any wwws\n", " site = re.sub(r\"^www\\d*\\.\", \"\", site)\n", " return site\n", "\n", "\n", "def get_links_in_page(capture):\n", " \"\"\"\n", " Extract internal and external links from a html page.\n", " \"\"\"\n", " internal_links = []\n", " external_links = []\n", " site = get_site_from_url(capture[\"url\"])\n", " soup = BeautifulSoup(capture[\"html\"])\n", " links = soup.find_all(\"a\")\n", " for link in links:\n", " try:\n", " href = link[\"href\"]\n", " except KeyError:\n", " pass\n", " else:\n", " if link_is_local(site, href):\n", " if href not in internal_links:\n", " internal_links.append(href)\n", " else:\n", " if href not in external_links:\n", " external_links.append(href)\n", " return {\"internal\": internal_links, \"external\": external_links}\n", "\n", "\n", "def get_links(page_data):\n", " \"\"\"\n", " Extract link info from all saved pages.\n", " \"\"\"\n", " all_links = []\n", " for capture in page_data:\n", " links = get_links_in_page(capture)\n", " all_links.append(links)\n", " return all_links\n", "\n", "\n", "def display_link_data(dates, all_links):\n", " \"\"\"\n", " Display the number of links in saved pages.\n", " \"\"\"\n", " totals = []\n", " for links in all_links:\n", " totals.append(\n", " {\n", " \"Total internal links\": len(links[\"internal\"]),\n", " \"Total external links\": len(links[\"external\"]),\n", " }\n", " )\n", " df = pd.DataFrame(totals, index=dates)\n", " display(df)\n", "\n", "\n", "def make_clickable(val):\n", " \"\"\"\n", " Make the value of a Pandas cell into a clickable link.\n", " \"\"\"\n", " return f'{val}' if val is not None else \"\"\n", "\n", "\n", "def list_external_links(dates, all_links):\n", " \"\"\"\n", " Display a list of external links using Pandas.\n", " \"\"\"\n", " # Put links into a dataframe, then transpose to make dates into columns\n", " df = pd.DataFrame([link[\"external\"] for link in all_links], index=dates).T\n", " # Make links clickable and align left\n", " df_styler = df.style.format(make_clickable).set_properties(**{\"text-align\": \"left\"})\n", " # Make the headers left aligned as well\n", " df_styler.set_table_styles([dict(selector=\"th\", props=[(\"text-align\", \"left\")])])\n", " # Display without the index\n", " display(df_styler.hide(axis=\"index\"))\n", "\n", "\n", "def display_links(page_data):\n", " \"\"\"\n", " Extract and display information about links in the saved pages.\n", " \"\"\"\n", " all_links = get_links(page_data)\n", " dates = get_dates(page_data)\n", " with links_out:\n", " display(HTML(\"

Links

\"))\n", " display_link_data(dates, all_links)\n", " display(HTML(\"

External links

\"))\n", " list_external_links(dates, all_links)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# SIMILARITY\n", "\n", "\n", "def calculate_similarity(text1, text2):\n", " \"\"\"\n", " Calculate cosine similarity of two texts.\n", " \"\"\"\n", " try:\n", " tfidf = TfidfVectorizer(min_df=1).fit_transform([text1, text2])\n", " except AttributeError:\n", " return None\n", " return (tfidf * tfidf.T).A[0][1]\n", "\n", "\n", "def calculate_similarities(page_data):\n", " \"\"\"\n", " Calculate cosine similarities for all the text, and the main text only, of the saved pages.\n", " \"\"\"\n", " similarities = {\n", " \"All text\": calculate_similarity(page_data[0][\"text\"], page_data[1][\"text\"]),\n", " \"Main text\": calculate_similarity(\n", " page_data[0][\"main_text\"], page_data[1][\"main_text\"]\n", " ),\n", " }\n", " return similarities\n", "\n", "\n", "def display_similarities(page_data):\n", " \"\"\"\n", " Display the similarity values.\n", " \"\"\"\n", " similarities = calculate_similarities(page_data)\n", " df = pd.DataFrame([similarities], index=[\"Cosine similarity\"]).T\n", " with sim_out:\n", " display(HTML(\"

Cosine similarity

\"))\n", " display(df)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# DIFFERENCES\n", "\n", "\n", "def process_text(capture, include=\"text\"):\n", " \"\"\"\n", " Prepare extracted text for diffing, by splitting into lines, and removing any blank lines.\n", " \"\"\"\n", " if include == \"text\":\n", " lines = [\n", " line.strip()\n", " for line in BeautifulSoup(capture[\"html\"]).get_text().splitlines()\n", " if not re.match(r\"^\\s*$\", line)\n", " ]\n", " # lines = capture['text'].splitlines()\n", " elif include == \"main_text\":\n", " lines = capture[\"main_text\"].splitlines()\n", " else:\n", " lines = [line.decode() for line in capture[\"html\"].splitlines()]\n", " return lines\n", "\n", "\n", "def format_date_link(url):\n", " date = format_date_from_timestamp(url)\n", " return f'{date}'\n", "\n", "\n", "def show_line_differences(page_data, include=\"text\", context=True, numlines=0):\n", " \"\"\"\n", " Use difflib to show a side-by-side comparison of the text in two web pages.\n", " \"\"\"\n", " differ = HtmlDiff()\n", " doc1 = process_text(page_data[0], include=include)\n", " doc2 = process_text(page_data[1], include=include)\n", " date1 = format_date_link(page_data[0][\"url\"])\n", " date2 = format_date_link(page_data[1][\"url\"])\n", " html = differ.make_table(\n", " doc1, doc2, context=context, numlines=numlines, fromdesc=date1, todesc=date2\n", " )\n", " # Rewrite the table html to make the column widths work better\n", " html = html.replace(\n", " r'\", \"\").replace(\"\", \"\")\n", " with diff_out:\n", " display(HTML(html))\n", "\n", "\n", "def display_diff(e):\n", " \"\"\"\n", " Update the diff display when the drop downs selection change.\n", " \"\"\"\n", " diff_out.clear_output(wait=True)\n", " which_text.observe(display_diff)\n", " what_context.observe(display_diff)\n", " with diff_out:\n", " display(HTML(\"

Differences by line

\"))\n", " display(widgets.HBox([which_text, what_context]))\n", " show_line_differences(\n", " page_data, include=which_text.value, context=what_context.value\n", " )\n", "\n", "\n", "which_text = widgets.Dropdown(\n", " options=[\n", " (\"All text\", \"text\"),\n", " (\"Main text\", \"main_text\"),\n", " (\"Complete html\", \"html\"),\n", " ],\n", " description=\"Compare:\",\n", " disabled=False,\n", ")\n", "\n", "what_context = widgets.Dropdown(\n", " options=[(\"Just changes\", True), (\"Complete context\", False)],\n", " description=\"Context:\",\n", " disabled=False,\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# SCREENSHOTS\n", "\n", "wayback = [\"ndhadeliver.natlib.govt.nz\", \"web.archive.org\"]\n", "pywb = {\n", " \"web.archive.org.au\": \"replayFrame\",\n", " \"webarchive.nla.gov.au\": \"replayFrame\",\n", " \"webarchive.org.uk\": \"replay_iframe\",\n", " \"webarchive.nationalarchives.gov.uk\" : \"replay_iframe\",\n", "}\n", "\n", "\n", "def get_full_page_screenshot(url, save_width=200):\n", " \"\"\"\n", " Gets a full page screenshot of the supplied url.\n", " By default resizes the screenshot to a maximum width of 200px.\n", " Provide a 'save_width' value to change this.\n", "\n", " NOTE the webdriver sometimes fails for unknown reasons. Just try again.\n", " \"\"\"\n", " domain = urlparse(url)[1].replace(\"www.\", \"\")\n", " # NZ and IA inject content into the page, so we use if_ to get the original page (with rewritten urls)\n", " if domain in wayback and \"if_\" not in url:\n", " url = re.sub(r\"/(\\d{12}|\\d{14})/http\", r\"/\\1if_/http\", url)\n", " try:\n", " date_str, site = re.search(\n", " r\"/(\\d{12}|\\d{14})(?:if_|mp_)*/https*://(.+/)\", url\n", " ).groups()\n", " except AttributeError:\n", " # There's something wrong with the link...\n", " # print(url)\n", " show_error(f\"{url} isn't a Memento – did you forget to select an archive?\")\n", " else:\n", " output_dir = Path(\"screenshots\")\n", " output_dir.mkdir(parents=True, exist_ok=True)\n", " ss_file = Path(output_dir, f\"{slugify(site)}-{date_str}-{save_width}.png\")\n", " options = webdriver.FirefoxOptions()\n", " options.headless = True\n", " driver = webdriver.Firefox(options=options)\n", " driver.implicitly_wait(15)\n", " driver.get(url)\n", " # Give some time for everything to load\n", " time.sleep(30)\n", " driver.maximize_window()\n", " # UK and AU use pywb in framed replay mode, so we need to switch to the framed content\n", " if domain in pywb:\n", " try:\n", " driver.switch_to.frame(pywb[domain])\n", " except selenium.common.exceptions.NoSuchFrameException:\n", " # If we pass here we'll probably still get a ss, just not full page -- better than failing?\n", " pass\n", " ss = None\n", " for tag in [\"body\", \"html\", \"frameset\"]:\n", " try:\n", " elem = driver.find_element(By.TAG_NAME, tag)\n", " ss = elem.screenshot_as_base64\n", " break\n", " except (\n", " selenium.common.exceptions.NoSuchElementException,\n", " selenium.common.exceptions.WebDriverException,\n", " ):\n", " pass\n", " driver.quit()\n", " if not ss:\n", " show_error(f\"Couldn't get a screenshot of {url} – sorry...\")\n", " else:\n", " img = Image.open(io.BytesIO(base64.b64decode(ss)))\n", " ratio = save_width / img.width\n", " (width, height) = (save_width, math.ceil(img.height * ratio))\n", " resized_img = img.resize((width, height), PIL.Image.Resampling.LANCZOS)\n", " resized_img.save(ss_file)\n", " return ss_file\n", "\n", "\n", "status = widgets.Output()\n", "\n", "\n", "def display_screenshots(urls):\n", " html_output = []\n", " with ss_out:\n", " display(HTML(\"

Screenshots

\"))\n", " display(status)\n", " for url in urls:\n", " with status:\n", " print(\"Generating screenshot...\")\n", " try:\n", " ss_file = get_full_page_screenshot(url, save_width=350)\n", " if ss_file:\n", " date = format_date_from_timestamp(url)\n", " try:\n", " display_url = re.search(\n", " r\"/(\\d{12}|\\d{14})(?:mp_|if_|id_)*/(.*)$\", url\n", " ).group(1)\n", " except AttributeError:\n", " display_url = url\n", " html_output.append(\n", " f'

{date}
{display_url}


[Download]

'\n", " )\n", " status.clear_output()\n", " ss_out.clear_output(wait=True)\n", " with ss_out:\n", " display(HTML(\"

Screenshots

\"))\n", " display(status)\n", " display((HTML(\"\".join(html_output))))\n", " except selenium.common.exceptions.WebDriverException:\n", " show_error(f\"couldn't get a screenshot of {url} – sorry...\")\n", "\n", "\n", "def show_error(message=None):\n", " status.clear_output()\n", " with status:\n", " print(f\"Something went wrong – {message}\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# USER INTERFACE\n", "\n", "page_data = []\n", "\n", "TIMEGATES = {\n", " \"nla\": \"https://web.archive.org.au/awa/\",\n", " \"nlnz\": \"https://ndhadeliver.natlib.govt.nz/webarchive/wayback/\",\n", " \"bl\": \"https://www.webarchive.org.uk/wayback/archive/\",\n", " \"ia\": \"https://web.archive.org/web/\",\n", " \"ukgwa\": \"https://webarchive.nationalarchives.gov.uk/ukgwa/\"\n", "}\n", "\n", "\n", "def format_date_for_headers(iso_date, tz):\n", " \"\"\"\n", " Convert an ISO date (YYYY-MM-DD) to a datetime at noon in the specified timezone.\n", " Convert the datetime to UTC and format as required by Accet-Datetime headers:\n", " eg Fri, 23 Mar 2007 01:00:00 GMT\n", " \"\"\"\n", " local = arrow.get(f\"{iso_date} 12:00:00 {tz}\", \"YYYY-MM-DD HH:mm:ss ZZZ\")\n", " gmt = local.to(\"utc\")\n", " return f'{gmt.format(\"ddd, DD MMM YYYY HH:mm:ss\")} GMT'\n", "\n", "\n", "def format_date_from_timestamp(url):\n", " timestamp = re.search(r\"/(\\d{12}|\\d{14})(?:if_|mp_|id_)*/\", url).group(1)\n", " return arrow.get(timestamp, \"YYYYMMDDHHmmss\").format(\"D MMMM YYYY\")\n", "\n", "\n", "def parse_links_from_headers(response):\n", " \"\"\"\n", " Extract original, timegate, timemap, and memento links from 'Link' header.\n", " \"\"\"\n", " links = response.links\n", " return {k: v[\"url\"] for k, v in links.items()}\n", "\n", "\n", "def query_timegate(timegate, url, date=None, tz=\"Australia/Canberra\"):\n", " \"\"\"\n", " Query the specified repository for a Memento.\n", " \"\"\"\n", " headers = {}\n", " if date:\n", " formatted_date = format_date_for_headers(date, tz)\n", " headers[\"Accept-Datetime\"] = formatted_date\n", " # BL,NLNZ & UKGWA don't seem to default to latest date if no date supplied\n", " elif not date and timegate in [\"bl\", \"nlnz\", \"ukgwa\"]:\n", " formatted_date = format_date_for_headers(\n", " arrow.utcnow().format(\"YYYY-MM-DD\"), tz\n", " )\n", " headers[\"Accept-Datetime\"] = formatted_date\n", " # Note that you don't get a timegate response if you leave off the trailing slash, but extras don't hurt!\n", " tg_url = (\n", " f\"{TIMEGATES[timegate]}{url}/\"\n", " if not url.endswith(\"/\")\n", " else f\"{TIMEGATES[timegate]}{url}\"\n", " )\n", " # print(tg_url)\n", " # IA only works if redirects are followed -- this defaults to False with HEAD requests...\n", " if timegate == \"ia\":\n", " allow_redirects = True\n", " else:\n", " allow_redirects = False\n", " response = requests.head(tg_url, headers=headers, allow_redirects=allow_redirects)\n", " return parse_links_from_headers(response)\n", "\n", "\n", "def get_memento(timegate, url, date=None, tz=\"Australia/Canberra\"):\n", " \"\"\"\n", " If there's no memento in the results, look for an alternative.\n", " \"\"\"\n", " links = query_timegate(timegate, url, date, tz)\n", " # NLNZ doesn't always seem to return a Memento, so we'll build in some fuzziness\n", " if links:\n", " if \"memento\" in links:\n", " memento = links[\"memento\"]\n", " elif \"prev memento\" in links:\n", " memento = links[\"prev memento\"]\n", " elif \"next memento\" in links:\n", " memento = links[\"next memento\"]\n", " elif \"last memento\" in links:\n", " memento = links[\"last memento\"]\n", " else:\n", " memento = None\n", " return memento\n", "\n", "\n", "def get_mementos():\n", " mementos = [\n", " get_memento(repository.value, target_url.value, first_date.value),\n", " get_memento(repository.value, target_url.value, second_date.value),\n", " ]\n", " return mementos\n", "\n", "\n", "def share_this(urls):\n", " binder_url = \"https://mybinder.org/v2/gh/GLAM-Workbench/web-archives/master?urlpath=/voila/render/show_diffs.ipynb\"\n", " parameter_string = quote(f\"?url1={urls[0]}&url2={urls[1]}\")\n", " share_url = f\"{binder_url}{parameter_string}\"\n", " with share_out:\n", " display(HTML(f'

Share this: {share_url}

'))\n", "\n", "\n", "def clear(e):\n", " global page_data\n", " page_data = []\n", " md_out.clear_output()\n", " stats_out.clear_output()\n", " links_out.clear_output()\n", " sim_out.clear_output()\n", " diff_out.clear_output()\n", " ss_out.clear_output()\n", " share_out.clear_output()\n", "\n", "\n", "def start(e):\n", " clear(\"e\")\n", " if url1 and url2:\n", " urls = [url1, url2]\n", " else:\n", " urls = get_mementos()\n", " load_data(urls)\n", " display_metadata(page_data)\n", " display_summaries(page_data)\n", " display_links(page_data)\n", " display_similarities(page_data)\n", " display_diff(\"e\")\n", " display_screenshots(urls)\n", " share_this(urls)\n", "\n", "\n", "def display_mementos(url1, url2, start_button):\n", " memento1 = widgets.Text(value=url1, layout=widgets.Layout(width=\"400px\"))\n", " memento2 = widgets.Text(value=url2, layout=widgets.Layout(width=\"400px\"))\n", " with options_out:\n", " display(\n", " widgets.HBox(\n", " [\n", " widgets.VBox(\n", " [\n", " widgets.Label(\"First memento:\"),\n", " widgets.Label(\"Second memento:\"),\n", " ]\n", " ),\n", " widgets.VBox([memento1, memento2, start_button]),\n", " ],\n", " layout=widgets.Layout(padding=\"20px\"),\n", " )\n", " )\n", "\n", "\n", "options_out = widgets.Output()\n", "md_out = widgets.Output()\n", "stats_out = widgets.Output()\n", "links_out = widgets.Output()\n", "sim_out = widgets.Output()\n", "diff_out = widgets.Output()\n", "ss_out = widgets.Output()\n", "share_out = widgets.Output()\n", "\n", "start_button = widgets.Button(description=\"Start\", button_style=\"primary\")\n", "start_button.on_click(start)\n", "\n", "query_string = os.environ.get(\"QUERY_STRING\", \"\")\n", "parameters = parse_qs(query_string)\n", "url1 = parameters.get(\"url1\", [\"\"])[0]\n", "url2 = parameters.get(\"url2\", [\"\"])[0]\n", "\n", "if url1 and url1:\n", " display_mementos(url1, url2, start_button)\n", "else:\n", " repository = widgets.Dropdown(\n", " options=[\n", " (\"---\", \"\"),\n", " (\"UK Web Archive\", \"bl\"),\n", " (\"UK Government Web Archive\", \"ukgwa\"),\n", " (\"National Library of Australia\", \"nla\"),\n", " (\"National Library of New Zealand\", \"nlnz\"),\n", " (\"Internet Archive\", \"ia\"),\n", " ],\n", " description=\"Archive:\",\n", " disabled=False,\n", " )\n", "\n", " target_url = widgets.Text(description=\"Target URL:\")\n", "\n", " first_date = widgets.DatePicker(description=\"Date 1: \", disabled=False)\n", "\n", " second_date = widgets.DatePicker(description=\"Date 2: \", disabled=False)\n", " with options_out:\n", " display(\n", " widgets.HBox(\n", " [\n", " widgets.VBox([repository, first_date]),\n", " widgets.VBox([target_url, second_date]),\n", " ],\n", " layout=widgets.Layout(padding=\"20px\"),\n", " ),\n", " widgets.HBox([start_button]),\n", " )\n", "display(options_out, md_out, stats_out, links_out, sim_out, diff_out, ss_out, share_out)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%%capture\n", "%load_ext dotenv\n", "%dotenv" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Insert some values for automated testing\n", "\n", "if os.getenv(\"GW_STATUS\") == \"dev\":\n", " options_out.clear_output()\n", " url1 = \"https://web.archive.org.au/awa/19981206012233mp_/http://www.discontents.com.au:80/\"\n", " url2 = (\n", " \"https://web.archive.org.au/awa/20100209041537mp_/http://discontents.com.au:80/\"\n", " )\n", " display_mementos(url1, url2, start_button)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# If values have been provided via url or above, then start automatically.\n", "# Note that Voila widgets don't load immediately, hence the polling to\n", "# make sure the start button exists.\n", "\n", "if url1 and url2:\n", " script = \"\"\"\n", " \"\"\"\n", " display(HTML(script))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "Created by [Tim Sherratt](https://timsherratt.org) for the [GLAM Workbench](https://glam-workbench.github.io). Support me by becoming a [GitHub sponsor](https://github.com/sponsors/wragge)!\n", "\n", "Work on this notebook was supported by the [IIPC Discretionary Funding Programme 2019-2020](http://netpreserve.org/projects/).\n", "\n", "The Web Archives section of the GLAM Workbench is sponsored by the [British Library](https://www.bl.uk/)." ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": {}, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 4 }