{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Coffea-Casa Benchmark Example 1" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "%matplotlib inline\n", "from coffea import hist\n", "import coffea.processor as processor\n", "import awkward as ak\n", "from coffea.nanoevents import schemas" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# This program plots an event-level variable (in this case, MET, but switching it is as easy as a dict-key change). It also demonstrates an easy use of the book-keeping cutflow tool, to keep track of the number of events processed.\n", "\n", "# The processor class bundles our data analysis together while giving us some helpful tools. It also leaves looping and chunks to the framework instead of us.\n", "class Processor(processor.ProcessorABC):\n", " def __init__(self):\n", " # Bins and categories for the histogram are defined here. For format, see https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Hist.html && https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Bin.html\n", " dataset_axis = hist.Cat(\"dataset\", \"\")\n", " MET_axis = hist.Bin(\"MET\", \"MET [GeV]\", 50, 0, 100)\n", " \n", " # The accumulator keeps our data chunks together for histogramming. It also gives us cutflow, which can be used to keep track of data.\n", " self._accumulator = processor.dict_accumulator({\n", " 'MET': hist.Hist(\"Counts\", dataset_axis, MET_axis),\n", " 'cutflow': processor.defaultdict_accumulator(int)\n", " })\n", " \n", " @property\n", " def accumulator(self):\n", " return self._accumulator\n", " \n", " def process(self, events):\n", " output = self.accumulator.identity()\n", " \n", " # This is where we do our actual analysis. The dataset has columns similar to the TTree's; events.columns can tell you them, or events.[object].columns for deeper depth.\n", " dataset = events.metadata[\"dataset\"]\n", " MET = events.MET.pt\n", " \n", " # We can define a new key for cutflow (in this case 'all events'). Then we can put values into it. We need += because it's per-chunk (demonstrated below)\n", " output['cutflow']['all events'] += ak.size(MET)\n", " output['cutflow']['number of chunks'] += 1\n", " \n", " # This fills our histogram once our data is collected. The hist key ('MET=') will be defined in the bin in __init__.\n", " output['MET'].fill(dataset=dataset, MET=MET)\n", " return output\n", "\n", " def postprocess(self, accumulator):\n", " return accumulator" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "
\n", "
\n", "

Client

\n", "

Client-e9018bf0-8b2d-11ec-8904-82674678ea37

\n", " \n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", "\n", "
Connection method: Direct
\n", " Dashboard: /user/oksana.shadura@cern.ch/proxy/8787/status\n", "
\n", "\n", " \n", "
\n", "

Scheduler Info

\n", "
\n", "
\n", "
\n", "
\n", "

Scheduler

\n", "

Scheduler-fa970486-b334-458d-93f6-e1fcf5106709

\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
\n", " Comm: tcp://127.0.0.1:36157\n", " \n", " Workers: 1\n", "
\n", " Dashboard: /user/oksana.shadura@cern.ch/proxy/8787/status\n", " \n", " Total threads: 4\n", "
\n", " Started: 2 hours ago\n", " \n", " Total memory: 15.70 GiB\n", "
\n", "
\n", "
\n", "\n", "
\n", " \n", "

Workers

\n", "
\n", "\n", " \n", "
\n", "
\n", "
\n", "
\n", " \n", "

Worker: 3

\n", "
\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "\n", " \n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "\n", "
\n", " Comm: tcp://127.0.0.1:34225\n", " \n", " Total threads: 4\n", "
\n", " Dashboard: /user/oksana.shadura@cern.ch/proxy/46647/status\n", " \n", " Memory: 15.70 GiB\n", "
\n", " Nanny: tcp://127.0.0.1:36243\n", "
\n", " Local directory: /home/cms-jovyan/dask-worker-space/worker-crip7cn_\n", "
\n", " Tasks executing: 0\n", " \n", " Tasks in memory: 1\n", "
\n", " Tasks ready: 0\n", " \n", " Tasks in flight: 0\n", "
\n", " CPU usage: 2.0%\n", " \n", " Last seen: Just now\n", "
\n", " Memory usage: 518.93 MiB\n", " \n", " Spilled bytes: 0 B\n", "
\n", " Read bytes: 41.93 kiB\n", " \n", " Write bytes: 57.77 kiB\n", "
\n", "
\n", "
\n", "
\n", " \n", "\n", "
\n", "
\n", "
\n", " \n", "\n", "
\n", "
" ], "text/plain": [ "" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from dask.distributed import Client\n", "\n", "client = Client(\"tcp://127.0.0.1:36157\")\n", "client" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "import os, shutil\n", "import uproot\n", "import awkward as ak\n", "\n", "if not os.path.isfile(\"/mnt/cephfs/Run2012B_SingleMu/Run2012B_SingleMu.parquet\"):\n", " ak.to_parquet(\n", " uproot.lazy(\"nano_dy.root:Events\"),\n", " \"Run2012B_SingleMu.parquet\",\n", " list_to32=True,\n", " use_dictionary=False,\n", " compression=\"GZIP\",\n", " compression_level=1,\n", " )\n", " \n", "if not os.path.isdir(\"/mnt/cephfs/nanoevents/Run2012B_SingleMu\"):\n", " os.makedirs(\"/mnt/cephfs/nanoevents/Run2012B_SingleMu\")\n", " shutil.copyfile('Run2012B_SingleMu.parquet', '/mnt/cephfs/Run2012B_SingleMu/Run2012B_SingleMu.parquet')" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "ename": "FileNotFoundError", "evalue": "/mnt/cephfs/Run2012B_SingleMu/Run2012B_SingleMu.parquet", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipykernel_2308/2740435347.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 8\u001b[0m format=\"parquet\")\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmetrics\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfileset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"Events\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprocessor_instance\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mProcessor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 11\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0mmetrics\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.8/site-packages/coffea/processor/executor.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, fileset, treename, processor_instance)\u001b[0m\n\u001b[1;32m 1344\u001b[0m \u001b[0mevents_total\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mc\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mchunks\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1345\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1346\u001b[0;31m \u001b[0mchunks\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mc\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mc\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mchunks\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1347\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1348\u001b[0m exe_args = {\n", "\u001b[0;32m/opt/conda/lib/python3.8/site-packages/coffea/processor/executor.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 1344\u001b[0m \u001b[0mevents_total\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mc\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mc\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mchunks\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1345\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1346\u001b[0;31m \u001b[0mchunks\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mc\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mc\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mchunks\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1347\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1348\u001b[0m exe_args = {\n", "\u001b[0;32m/opt/conda/lib/python3.8/site-packages/coffea/processor/executor.py\u001b[0m in \u001b[0;36m_chunk_generator\u001b[0;34m(self, fileset, treename)\u001b[0m\n\u001b[1;32m 1125\u001b[0m \u001b[0mdataset_filelist_map\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1126\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbasedir\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mfileset\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1127\u001b[0;31m \u001b[0mds_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mds\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbasedir\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mformat\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"parquet\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1128\u001b[0m \u001b[0mdataset_filelist_map\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mds_\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfiles\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1129\u001b[0m \u001b[0mchunks\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.8/site-packages/pyarrow/dataset.py\u001b[0m in \u001b[0;36mdataset\u001b[0;34m(source, schema, format, filesystem, partitioning, partition_base_dir, exclude_invalid_files, ignore_prefixes)\u001b[0m\n\u001b[1;32m 659\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 660\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_is_path_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0melem\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0melem\u001b[0m \u001b[0;32min\u001b[0m \u001b[0msource\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 661\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_filesystem_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 662\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0melem\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0melem\u001b[0m \u001b[0;32min\u001b[0m \u001b[0msource\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 663\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_union_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.8/site-packages/pyarrow/dataset.py\u001b[0m in \u001b[0;36m_filesystem_dataset\u001b[0;34m(source, schema, filesystem, partitioning, format, partition_base_dir, exclude_invalid_files, selector_ignore_prefixes)\u001b[0m\n\u001b[1;32m 399\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 400\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mlist\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtuple\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 401\u001b[0;31m \u001b[0mfs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpaths_or_selector\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_ensure_multiple_sources\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilesystem\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 402\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 403\u001b[0m \u001b[0mfs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpaths_or_selector\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_ensure_single_source\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilesystem\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/opt/conda/lib/python3.8/site-packages/pyarrow/dataset.py\u001b[0m in \u001b[0;36m_ensure_multiple_sources\u001b[0;34m(paths, filesystem)\u001b[0m\n\u001b[1;32m 318\u001b[0m \u001b[0;32mcontinue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 319\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mfile_type\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mFileType\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNotFound\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 320\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mFileNotFoundError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 321\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mfile_type\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mFileType\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDirectory\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 322\u001b[0m raise IsADirectoryError(\n", "\u001b[0;31mFileNotFoundError\u001b[0m: /mnt/cephfs/Run2012B_SingleMu/Run2012B_SingleMu.parquet" ] }, { "name": "stderr", "output_type": "stream", "text": [ "distributed.client - ERROR - Failed to reconnect to scheduler after 30.00 seconds, closing client\n", "_GatheringFuture exception was never retrieved\n", "future: <_GatheringFuture finished exception=CancelledError()>\n", "asyncio.exceptions.CancelledError\n" ] } ], "source": [ "fileset = {'SingleMu' : [\"/mnt/cephfs/Run2012B_SingleMu/Run2012B_SingleMu.parquet\"]}\n", "\n", "run = processor.Runner(executor=processor.DaskExecutor(client=client),\n", " schema=schemas.NanoAODSchema,\n", " savemetrics=True,\n", " use_skyhook=True,\n", " skyhook_options = {\"ceph_config_path\": \"/opt/ceph/ceph.conf\", \"ceph_data_pool\": \"cephfs_data0\"},\n", " format=\"parquet\")\n", "\n", "output, metrics = run(fileset, \"Events\", processor_instance=Processor())\n", "\n", "metrics" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Generates a 1D histogram from the data output to the 'MET' key. fill_opts are optional, to fill the graph (default is a line).\n", "hist.plot1d(output['MET'], overlay='dataset', fill_opts={'edgecolor': (0,0,0,0.3), 'alpha': 0.8})" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Easy way to print all cutflow dict values. Can just do print(output['cutflow'][\"KEY_NAME\"]) for one.\n", "for key, value in output['cutflow'].items():\n", " print(key, value)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.12" } }, "nbformat": 4, "nbformat_minor": 4 }