{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Leakage characterization using GST\n", "This tutorial demonstrates how to perform GST on a \"leaky-qubit\" described by a 3-level (instead of the desired 2-level) system. " ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pygsti\n", "import pygsti.construction as pc\n", "import pygsti.construction.std1Q_XYI as std1Q\n", "import numpy as np\n", "import scipy.linalg as sla\n", "#import pickle" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def to_3level_unitary(U_2level):\n", " U_3level = np.zeros((3,3),complex)\n", " U_3level[0:2,0:2] = U_2level\n", " U_3level[2,2] = 1.0\n", " return U_3level\n", "\n", "def unitary_to_gmgate(U):\n", " return pygsti.tools.change_basis( \n", " pygsti.tools.unitary_to_process_mx(U), 'std','gm')\n", "\n", "def state_to_gmvec(state):\n", " pygsti.tools.stdmx_to_gmvec\n", "\n", "Us = pygsti.tools.internalgates.get_standard_gatename_unitaries()" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "mdl_2level_ideal = std1Q.target_model()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "rho0 = np.array( [[1,0,0],\n", " [0,0,0],\n", " [0,0,0]], complex)\n", "E0 = rho0\n", "E1 = np.array( [[0,0,0],\n", " [0,1,0],\n", " [0,0,1]], complex)\n", "\n", "sslbls = pygsti.obj.StateSpaceLabels(['Qubit+Leakage'],[3])\n", "mdl_3level_ideal = pygsti.obj.ExplicitOpModel(sslbls, 'gm')\n", "mdl_3level_ideal['rho0'] = pygsti.tools.stdmx_to_gmvec(rho0)\n", "mdl_3level_ideal['Mdefault'] = pygsti.obj.TPPOVM([('0',pygsti.tools.stdmx_to_gmvec(E0)),\n", " ('1',pygsti.tools.stdmx_to_gmvec(E1))])\n", "\n", "mdl_3level_ideal['Gi'] = unitary_to_gmgate( to_3level_unitary(Us['Gi']))\n", "mdl_3level_ideal['Gx'] = unitary_to_gmgate( to_3level_unitary(Us['Gxpi2']))\n", "mdl_3level_ideal['Gy'] = unitary_to_gmgate( to_3level_unitary(Us['Gypi2']))" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "sigmaX = np.array([[0,1],[1,0]],complex)\n", "rot = sla.expm(1j * 0.1 * sigmaX)\n", "Uleakage = np.identity(3,complex)\n", "Uleakage[1:3,1:3] = rot\n", "leakageOp = unitary_to_gmgate(Uleakage)\n", "#print(Uleakage)\n", "\n", "#Guess of a model w/just unitary leakage\n", "mdl_3level_guess = mdl_3level_ideal.copy()\n", "mdl_3level_guess['Gi'] = np.dot(leakageOp, mdl_3level_guess['Gi'])\n", "#mdl_3level_guess['Gx'] = np.dot(leakageOp, mdl_3level_guess['Gx'])\n", "#mdl_3level_guess['Gy'] = np.dot(leakageOp, mdl_3level_guess['Gy'])\n", "\n", "#Actual model used for data generation (some depolarization too)\n", "mdl_3level_noisy = mdl_3level_ideal.depolarize(op_noise=0.005, spam_noise=0.01)\n", "mdl_3level_noisy['Gi'] = np.dot(leakageOp, mdl_3level_noisy['Gi'])\n", "#mdl_3level_noisy['Gx'] = np.dot(leakageOp, mdl_3level_noisy['Gx'])\n", "#mdl_3level_noisy['Gy'] = np.dot(leakageOp, mdl_3level_noisy['Gy'])" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "#print(mdl_3level_guess)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "# get sequences using expected model\n", "generate_fiducials = False\n", "\n", "if generate_fiducials:\n", " prepfids, measfids = pygsti.algorithms.generate_fiducials(\n", " mdl_3level_guess, omitIdentity=False, maxFidLength=4, verbosity=4)\n", " pygsti.io.write_circuit_list(\"example_files/leakage_prepfids.txt\", prepfids)\n", " pygsti.io.write_circuit_list(\"example_files/leakage_measfids.txt\", measfids)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "prepfids = pygsti.io.load_circuit_list(\"example_files/leakage_prepfids.txt\")\n", "measfids = pygsti.io.load_circuit_list(\"example_files/leakage_measfids.txt\")\n", "germs = std1Q.germs\n", "maxLengths = [1,]\n", "expList = pc.make_lsgst_experiment_list(mdl_3level_noisy, prepfids, measfids, germs, maxLengths)\n", "ds = pc.generate_fake_data(mdl_3level_noisy, expList, 1000, 'binomial', seed=1234)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "-- Std Practice: Iter 1 of 1 (CPTP) --: \n", " --- Circuit Creation ---\n", " 305 sequences created\n", " Dataset has 305 entries: 305 utilized, 0 requested sequences were missing\n", " --- Iterative MLGST: Iter 1 of 1 305 operation sequences ---: \n", " --- Minimum Chi^2 GST ---\n", " Sum of Chi^2 = 794.557 (305 data params - 31 model params = expected mean of 274; p-value = 0)\n", " Completed in 5.4s\n", " 2*Delta(log(L)) = 804.647\n", " Iteration 1 took 5.4s\n", " \n", " Switching to ML objective (last iteration)\n", " --- MLGST ---\n", " Maximum log(L) = 395.179 below upper bound of -458095\n", " 2*Delta(log(L)) = 790.358 (305 data params - 31 model params = expected mean of 274; p-value = 0)\n", " Completed in 1.1s\n", " 2*Delta(log(L)) = 790.358\n", " Final MLGST took 1.1s\n", " \n", " Iterative MLGST Total Time: 6.5s\n", " --- Re-optimizing logl after robust data scaling ---\n", " --- MLGST ---\n", " Maximum log(L) = 395.179 below upper bound of -458095\n", " 2*Delta(log(L)) = 790.357 (305 data params - 31 model params = expected mean of 274; p-value = 0)\n", " Completed in 0.5s\n", " -- Performing 'single' gauge optimization on CPTP estimate --\n", " -- Performing 'single' gauge optimization on CPTP.Robust+ estimate --\n", "-- Std Practice: Iter 1 of 2 (CPTP) --: \n", " --- Circuit Creation ---\n", " 305 sequences created\n", " Dataset has 305 entries: 305 utilized, 0 requested sequences were missing\n", " --- Iterative MLGST: Iter 1 of 1 305 operation sequences ---: \n", " --- Minimum Chi^2 GST ---\n", " Created evaluation tree with 1 subtrees. Will divide 1 procs into 1 (subtree-processing)\n", " groups of ~1 procs each, to distribute over 360 params (taken as 1 param groups of ~360 params).\n", " --- Outer Iter 0: norm_f = 1.57682e+06, mu=0, |J|=2278.38\n", " --- Outer Iter 1: norm_f = 43698, mu=428.568, |J|=14254\n", " --- Outer Iter 2: norm_f = 5115.05, mu=194.529, |J|=2388.29\n", " --- Outer Iter 3: norm_f = 4310.77, mu=250.155, |J|=2062.3\n", " --- Outer Iter 4: norm_f = 626.091, mu=83.385, |J|=2319.04\n", " --- Outer Iter 5: norm_f = 342.581, mu=526.228, |J|=2351.88\n", " --- Outer Iter 6: norm_f = 276.286, mu=216.687, |J|=2390.09\n", " --- Outer Iter 7: norm_f = 271.075, mu=863.721, |J|=2399.36\n", " Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 0.01\n", " Sum of Chi^2 = 271.075 (305 data params - 161 model params = expected mean of 144; p-value = 7.91906e-10)\n", " Completed in 17.6s\n", " 2*Delta(log(L)) = 273.05\n", " Iteration 1 took 17.6s\n", " \n", " Switching to ML objective (last iteration)\n", " --- MLGST ---\n", " --- Outer Iter 0: norm_f = 136.525, mu=0, |J|=1697.85\n", " Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 0.01\n", " Maximum log(L) = 136.525 below upper bound of -458095\n", " 2*Delta(log(L)) = 273.05 (305 data params - 161 model params = expected mean of 144; p-value = 4.90231e-10)\n", " Completed in 1.3s\n", " 2*Delta(log(L)) = 273.05\n", " Final MLGST took 1.3s\n", " \n", " Iterative MLGST Total Time: 18.9s\n", " --- Re-optimizing logl after robust data scaling ---\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "\n", "WARNING: MLGST failed to improve logl: retaining chi2-objective estimate\n", "/Users/enielse/research/pyGSTi/packages/pygsti/objects/estimate.py:531: UserWarning:\n", "\n", "Max-model params (305) <= model params (360)! Using k == 1.\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ " --- MLGST ---\n", " --- Outer Iter 0: norm_f = 136.525, mu=0, |J|=1697.85\n", " Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 0.01\n", " Maximum log(L) = 136.525 below upper bound of -458095\n", " 2*Delta(log(L)) = 273.05 (305 data params - 161 model params = expected mean of 144; p-value = 4.90231e-10)\n", " Completed in 1.2s\n", " -- Performing 'single' gauge optimization on CPTP estimate --\n", " -- Adding Gauge Optimized (single) --\n", " -- Conveying 'single' gauge optimization to CPTP.Robust+ estimate --\n", " -- Adding Gauge Optimized (single) --\n", "-- Std Practice: Iter 2 of 2 (True) --: \n", " --- Circuit Creation ---\n", " 305 sequences created\n", " Dataset has 305 entries: 305 utilized, 0 requested sequences were missing\n", " -- Performing 'single' gauge optimization on True estimate --\n", " -- Adding Gauge Optimized (single) --\n" ] } ], "source": [ "results_2level = pygsti.do_stdpractice_gst(ds, mdl_2level_ideal, prepfids, measfids,\n", " germs, maxLengths, modes=\"CPTP\", verbosity=3)\n", "results_3level = pygsti.do_stdpractice_gst(ds, mdl_3level_ideal, prepfids, measfids,\n", " germs, maxLengths, modes=\"CPTP,True\",\n", " modelsToTest={'True': mdl_3level_noisy}, \n", " verbosity=4, advancedOptions={'all': {'tolerance': 1e-2}})" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "*** Creating workspace ***\n", "*** Generating switchboard ***\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/enielse/research/pyGSTi/packages/pygsti/report/factory.py:785: UserWarning:\n", "\n", "Idle tomography failed:\n", "Label{layers}\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Found standard clifford compilation from std1Q_XYI\n", "*** Generating tables ***\n", "*** Generating plots ***\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "*** Merging into template file ***\n", "Output written to example_files/leakage_report directory\n", "*** Report Generation Complete! Total time 32.8807s ***\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pygsti.report.create_standard_report({'two-level': results_2level, 'three-level': results_3level},\n", " \"example_files/leakage_report\", \"Leakage Example Report\")" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "#try a different basis:\n", "gm_basis = pygsti.obj.Basis('gm',3)\n", " \n", "leakage_basis_mxs = [ np.sqrt(2)/3*(np.sqrt(3)*gm_basis[0] + 0.5*np.sqrt(6)*gm_basis[8]),\n", " gm_basis[1], gm_basis[4], gm_basis[7],\n", " gm_basis[2], gm_basis[3], gm_basis[5], gm_basis[6],\n", " 1/3*(np.sqrt(3)*gm_basis[0] - np.sqrt(6)*gm_basis[8]) ]\n", "#for mx in leakage_basis_mxs:\n", "# pygsti.tools.print_mx(mx)\n", "\n", "check = np.zeros( (9,9), complex)\n", "for i,m1 in enumerate(leakage_basis_mxs):\n", " for j,m2 in enumerate(leakage_basis_mxs):\n", " check[i,j] = np.trace(np.dot(m1,m2))\n", "assert(np.allclose(check, np.identity(9,complex)))\n", "\n", "leakage_basis = pygsti.obj.Basis(name=\"LeakageBasis\", matrices=leakage_basis_mxs, \n", " longname=\"2+1 level leakage basis\", real=True,\n", " labels=['I','X','Y','Z','LX0','LX1','LY0','LY1','L'])\n", "\n", "def changebasis_3level_model(mdl):\n", " new_mdl = mdl.copy()\n", " new_mdl.preps['rho0'] = pygsti.obj.FullSPAMVec(\n", " pygsti.tools.change_basis(mdl.preps['rho0'].todense(), gm_basis, leakage_basis))\n", " new_mdl.povms['Mdefault'] = pygsti.obj.UnconstrainedPOVM(\n", " [('0', pygsti.tools.change_basis(mdl.povms['Mdefault']['0'].todense(), gm_basis, leakage_basis)),\n", " ('1', pygsti.tools.change_basis(mdl.povms['Mdefault']['1'].todense(), gm_basis, leakage_basis))])\n", " \n", " for lbl,op in mdl.operations.items():\n", " new_mdl.operations[lbl] = pygsti.obj.FullDenseOp(\n", " pygsti.tools.change_basis(op.todense(), gm_basis, leakage_basis))\n", " new_mdl.basis = leakage_basis\n", " return new_mdl\n", "\n", "def changebasis_3level_results(results):\n", " new_results = results.copy()\n", " for estlbl,est in results.estimates.items():\n", " for mlbl,mdl in est.models.items():\n", " if isinstance(mdl,(list,tuple)): #assume a list/tuple of models\n", " new_results.estimates[estlbl].models[mlbl] = \\\n", " [ changebasis_3level_model(m) for m in mdl ]\n", " else:\n", " new_results.estimates[estlbl].models[mlbl] = changebasis_3level_model(mdl)\n", " return new_results\n", " " ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "results_3level_leakage_basis = changebasis_3level_results( results_3level ) " ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "*** Creating workspace ***\n", "*** Generating switchboard ***\n", "Found standard clifford compilation from std1Q_XYI\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/enielse/research/pyGSTi/packages/pygsti/report/factory.py:785: UserWarning:\n", "\n", "Idle tomography failed:\n", "Label{layers}\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "*** Generating tables ***\n", "*** Generating plots ***\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "*** Merging into template file ***\n", "Output written to example_files/leakage_report directory\n", "*** Report Generation Complete! Total time 30.2682s ***\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pygsti.report.create_standard_report({'two-level': results_2level, 'three-level': results_3level_leakage_basis},\n", " \"example_files/leakage_report\", \"Leakage Example Report\")\n", " #advancedOptions={'autosize': 'none'})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Open the report [here](example_files/leakage_report/main.html)" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [], "source": [ "# use \"kite\" density-matrix structure\n", "def to_2plus1_superop(superop_2level):\n", " ret = np.zeros((5,5),'d')\n", " ret[0:4,0:4] = superop_2level\n", " ret[4,4] = 1.0 #leave leakage population where it is\n", " return ret\n", "\n", "#Tack on a single extra \"0\" for the 5-th dimension corresponding\n", "# to the classical leakage level population.\n", "rho0 = np.concatenate( (mdl_2level_ideal.preps['rho0'],[[0]]), axis=0)\n", "E0 = np.concatenate( (mdl_2level_ideal.povms['Mdefault']['0'],[[0]]), axis=0)\n", "E1 = np.concatenate( (mdl_2level_ideal.povms['Mdefault']['1'],[[0]]), axis=0)\n", "\n", "\n", "sslbls = pygsti.obj.StateSpaceLabels([('Qubit',),('Leakage',)],[(2,),(1,)])\n", "mdl_2plus1_ideal = pygsti.obj.ExplicitOpModel(sslbls, 'gm')\n", "mdl_2plus1_ideal['rho0'] = rho0\n", "mdl_2plus1_ideal['Mdefault'] = pygsti.obj.UnconstrainedPOVM([('0',E0),('1',E1)])\n", "\n", "mdl_2plus1_ideal['Gi'] = to_2plus1_superop(mdl_2level_ideal['Gi'])\n", "mdl_2plus1_ideal['Gx'] = to_2plus1_superop(mdl_2level_ideal['Gi'])\n", "mdl_2plus1_ideal['Gy'] = to_2plus1_superop(mdl_2level_ideal['Gi'])" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--- Circuit Creation ---\n", " 305 sequences created\n", " Dataset has 305 entries: 305 utilized, 0 requested sequences were missing\n", "--- Iterative MLGST: Iter 1 of 1 305 operation sequences ---: \n", " --- Minimum Chi^2 GST ---\n", " Created evaluation tree with 1 subtrees. Will divide 1 procs into 1 (subtree-processing)\n", " groups of ~1 procs each, to distribute over 90 params (taken as 1 param groups of ~90 params).\n", " --- Outer Iter 0: norm_f = 1.24364e+09, mu=0, |J|=255630\n", " --- Outer Iter 1: norm_f = 118724, mu=2.46191e+06, |J|=4148.05\n", " --- Outer Iter 2: norm_f = 84982, mu=820638, |J|=3690.92\n", " --- Outer Iter 3: norm_f = 77529.7, mu=273546, |J|=3614.72\n", " --- Outer Iter 4: norm_f = 76388.2, mu=91182, |J|=3594.1\n", " --- Outer Iter 5: norm_f = 74758.6, mu=30394, |J|=3564.1\n", " --- Outer Iter 6: norm_f = 73202.6, mu=15306.2, |J|=3596.98\n", " --- Outer Iter 7: norm_f = 72412.3, mu=5102.08, |J|=3791.87\n", " --- Outer Iter 8: norm_f = 71574.8, mu=1700.69, |J|=4422.36\n", " --- Outer Iter 9: norm_f = 71115.3, mu=680.571, |J|=5558.64\n", " --- Outer Iter 10: norm_f = 70928.1, mu=226.857, |J|=6091.95\n", " --- Outer Iter 11: norm_f = 70903.7, mu=75.619, |J|=6573.42\n", " --- Outer Iter 12: norm_f = 70612.8, mu=50.4127, |J|=6710.9\n", " --- Outer Iter 13: norm_f = 69398.7, mu=1075.47, |J|=6299.2\n", " --- Outer Iter 14: norm_f = 68306.2, mu=779.583, |J|=4824.62\n", " --- Outer Iter 15: norm_f = 67823.2, mu=6261.97, |J|=4949.68\n", " --- Outer Iter 16: norm_f = 67490.5, mu=6697.25, |J|=4896.8\n", " --- Outer Iter 17: norm_f = 67162.3, mu=7139.91, |J|=4928.2\n", " --- Outer Iter 18: norm_f = 66605.7, mu=7153.15, |J|=5157.1\n", " --- Outer Iter 19: norm_f = 66243.5, mu=8473.51, |J|=5910.77\n", " --- Outer Iter 20: norm_f = 65658.5, mu=9095.27, |J|=6328.83\n", " --- Outer Iter 21: norm_f = 65030.6, mu=18792.6, |J|=6937.75\n", " --- Outer Iter 22: norm_f = 64421.3, mu=18767.7, |J|=7183.83\n", " --- Outer Iter 23: norm_f = 64070.1, mu=18763.1, |J|=7902.07\n", " --- Outer Iter 24: norm_f = 63719.8, mu=14475.1, |J|=8422.75\n", " --- Outer Iter 25: norm_f = 63366, mu=10494.3, |J|=9939.72\n", " --- Outer Iter 26: norm_f = 62945.6, mu=3498.1, |J|=11640.4\n", " --- Outer Iter 27: norm_f = 62352.3, mu=1424.59, |J|=12533.2\n", " --- Outer Iter 28: norm_f = 62220.6, mu=1.55603e+07, |J|=12923.5\n", " --- Outer Iter 29: norm_f = 62136.8, mu=1.29088e+07, |J|=18073.9\n", " --- Outer Iter 30: norm_f = 62104.9, mu=4.30293e+06, |J|=16315.5\n", " --- Outer Iter 31: norm_f = 62074.4, mu=1.43431e+06, |J|=16691.7\n", " --- Outer Iter 32: norm_f = 62050.8, mu=478103, |J|=15755.5\n", " --- Outer Iter 33: norm_f = 62027.9, mu=159368, |J|=16141.1\n", " --- Outer Iter 34: norm_f = 61976, mu=53122.6, |J|=16197.6\n", " --- Outer Iter 35: norm_f = 61841.2, mu=17707.5, |J|=16741.4\n", " --- Outer Iter 36: norm_f = 61524.5, mu=5902.51, |J|=17371.9\n", " --- Outer Iter 37: norm_f = 60847.7, mu=1967.5, |J|=18946.7\n", " --- Outer Iter 38: norm_f = 60510.2, mu=15760.6, |J|=12102.6\n", " --- Outer Iter 39: norm_f = 58570.2, mu=5.3796e+06, |J|=20510.2\n", " --- Outer Iter 40: norm_f = 58548.5, mu=7.05875e+06, |J|=97573.5\n", " --- Outer Iter 41: norm_f = 58400.8, mu=5.43615e+06, |J|=33072.7\n", " --- Outer Iter 42: norm_f = 58323.5, mu=1.81546e+06, |J|=24674.7\n", " --- Outer Iter 43: norm_f = 58135, mu=605155, |J|=25141.6\n", " --- Outer Iter 44: norm_f = 57594.7, mu=201718, |J|=21948.8\n", " --- Outer Iter 45: norm_f = 56254.3, mu=67239.4, |J|=17377.8\n", " --- Outer Iter 46: norm_f = 53890.6, mu=22413.1, |J|=17618.3\n", " --- Outer Iter 47: norm_f = 51060.5, mu=7471.05, |J|=25556.8\n", " --- Outer Iter 48: norm_f = 48553.9, mu=6814.6, |J|=18395.9\n", " --- Outer Iter 49: norm_f = 48451.7, mu=7.44336e+07, |J|=20679.7\n", " --- Outer Iter 50: norm_f = 48223.4, mu=2.48112e+07, |J|=20158\n", " --- Outer Iter 51: norm_f = 47908.8, mu=8.59843e+06, |J|=31843.3\n", " --- Outer Iter 52: norm_f = 47725, mu=2.86614e+06, |J|=24966.9\n", " --- Outer Iter 53: norm_f = 47520, mu=955382, |J|=26381.3\n", " --- Outer Iter 54: norm_f = 47319.8, mu=318461, |J|=25410.5\n", " --- Outer Iter 55: norm_f = 47164.4, mu=106154, |J|=24379.4\n", " --- Outer Iter 56: norm_f = 46907.9, mu=35384.5, |J|=23600\n", " --- Outer Iter 57: norm_f = 46304.8, mu=11794.8, |J|=22068.3\n", " --- Outer Iter 58: norm_f = 44969.6, mu=3931.61, |J|=24374.4\n", " --- Outer Iter 59: norm_f = 43343.9, mu=2621.07, |J|=52276.5\n", " --- Outer Iter 60: norm_f = 42469.2, mu=8445.74, |J|=89068.1\n", " --- Outer Iter 61: norm_f = 41220.4, mu=5630.49, |J|=47371.5\n", " --- Outer Iter 62: norm_f = 40610.6, mu=15014.6, |J|=30842\n", " --- Outer Iter 63: norm_f = 39770.6, mu=10009.8, |J|=31737.1\n", " --- Outer Iter 64: norm_f = 38576.5, mu=6673.18, |J|=135099\n", " --- Outer Iter 65: norm_f = 37882.1, mu=17795.1, |J|=59527\n", " --- Outer Iter 66: norm_f = 36924.8, mu=11863.4, |J|=36466.8\n", " --- Outer Iter 67: norm_f = 36395.6, mu=14042.5, |J|=58030.4\n", " --- Outer Iter 68: norm_f = 34909.2, mu=15422.7, |J|=41186.2\n", " --- Outer Iter 69: norm_f = 25883.2, mu=5140.9, |J|=8838.31\n", " --- Outer Iter 70: norm_f = 25821.8, mu=5.61524e+07, |J|=9415.83\n", " --- Outer Iter 71: norm_f = 25710.6, mu=1.87175e+07, |J|=11055.2\n", " --- Outer Iter 72: norm_f = 25611, mu=1.35695e+07, |J|=15154.1\n", " --- Outer Iter 73: norm_f = 25536.2, mu=7.55276e+06, |J|=12694.6\n", " --- Outer Iter 74: norm_f = 25474.4, mu=2.51759e+06, |J|=13694.4\n", " --- Outer Iter 75: norm_f = 25372.1, mu=839196, |J|=13384.8\n", " --- Outer Iter 76: norm_f = 25162.8, mu=279732, |J|=13840.3\n", " --- Outer Iter 77: norm_f = 24687.1, mu=93244, |J|=13200.4\n", " --- Outer Iter 78: norm_f = 23409.4, mu=31081.3, |J|=12234.2\n", " --- Outer Iter 79: norm_f = 19735.1, mu=10360.4, |J|=9825.76\n", " --- Outer Iter 80: norm_f = 14163.6, mu=6906.96, |J|=10453.7\n", " --- Outer Iter 81: norm_f = 13865.5, mu=442087, |J|=93836.1\n", " --- Outer Iter 82: norm_f = 13191.2, mu=222370, |J|=22834.9\n", " --- Outer Iter 83: norm_f = 13107, mu=320192, |J|=13272.2\n", " --- Outer Iter 84: norm_f = 12094.7, mu=106731, |J|=11792.7\n", " --- Outer Iter 85: norm_f = 11559.9, mu=106382, |J|=12223.7\n", " --- Outer Iter 86: norm_f = 10314, mu=35460.6, |J|=11506.8\n", " --- Outer Iter 87: norm_f = 9209.45, mu=33416.7, |J|=12317.4\n", " --- Outer Iter 88: norm_f = 7099.7, mu=11138.9, |J|=11237\n", " --- Outer Iter 89: norm_f = 4308.38, mu=3712.97, |J|=11886.4\n", " --- Outer Iter 90: norm_f = 3279.34, mu=9901.24, |J|=13309.3\n", " --- Outer Iter 91: norm_f = 2332.54, mu=6600.83, |J|=11630.8\n", " --- Outer Iter 92: norm_f = 1419.22, mu=2438.53, |J|=11854.7\n", " --- Outer Iter 93: norm_f = 996.859, mu=2229.78, |J|=12670.8\n", " --- Outer Iter 94: norm_f = 751.65, mu=912.108, |J|=10595.4\n", " --- Outer Iter 95: norm_f = 689.885, mu=304.036, |J|=10225.9\n", " --- Outer Iter 96: norm_f = 665.816, mu=304.653, |J|=12151.2\n", " --- Outer Iter 97: norm_f = 532.045, mu=170.053, |J|=12365.1\n", " --- Outer Iter 98: norm_f = 443.769, mu=324.131, |J|=12753.9\n", " --- Outer Iter 99: norm_f = 370.178, mu=136.528, |J|=10709.4\n", " --- Outer Iter 100: norm_f = 349.476, mu=60.9215, |J|=10246.4\n", " --- Outer Iter 101: norm_f = 343.463, mu=20.3072, |J|=10330.6\n", " --- Outer Iter 102: norm_f = 342.654, mu=6.76906, |J|=10401.6\n", " --- Outer Iter 103: norm_f = 342.501, mu=2.25635, |J|=10417.7\n", " --- Outer Iter 104: norm_f = 342.491, mu=0.752118, |J|=10420.3\n", " --- Outer Iter 105: norm_f = 342.49, mu=0.250706, |J|=10420.2\n", " Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 1e-06\n", " Sum of Chi^2 = 342.49 (305 data params - 65 model params = expected mean of 240; p-value = 1.52201e-05)\n", " Completed in 13.1s\n", " 2*Delta(log(L)) = 344.44\n", " Iteration 1 took 13.1s\n", " \n", " Switching to ML objective (last iteration)\n", " --- MLGST ---\n", " --- Outer Iter 0: norm_f = 172.22, mu=0, |J|=7358.03\n", " --- Outer Iter 1: norm_f = 171.499, mu=2396.3, |J|=7476.83\n", " --- Outer Iter 2: norm_f = 171.485, mu=798.768, |J|=7474.31\n", " --- Outer Iter 3: norm_f = 171.477, mu=266.256, |J|=7474.09\n", " --- Outer Iter 4: norm_f = 171.472, mu=88.752, |J|=7473.33\n", " --- Outer Iter 5: norm_f = 171.47, mu=29.584, |J|=7472.42\n", " --- Outer Iter 6: norm_f = 171.467, mu=9.86134, |J|=7471.75\n", " --- Outer Iter 7: norm_f = 171.465, mu=3.28711, |J|=7472.02\n", " --- Outer Iter 8: norm_f = 171.465, mu=1.0957, |J|=7472.82\n", " Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 1e-06\n", " Maximum log(L) = 171.465 below upper bound of -458095\n", " 2*Delta(log(L)) = 342.929 (305 data params - 65 model params = expected mean of 240; p-value = 1.41955e-05)\n", " Completed in 1.1s\n", " 2*Delta(log(L)) = 342.929\n", " Final MLGST took 1.1s\n", " \n", "Iterative MLGST Total Time: 14.2s\n", " -- Adding Gauge Optimized (go0) --\n", "--- Re-optimizing logl after robust data scaling ---\n", " --- MLGST ---\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/enielse/research/pyGSTi/packages/pygsti/algorithms/gaugeopt.py:240: UserWarning:\n", "\n", "No gauge group specified, so no gauge optimization performed.\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ " --- Outer Iter 0: norm_f = 171.465, mu=0, |J|=7472.82\n", " Least squares message = Both actual and predicted relative reductions in the sum of squares are at most 1e-06\n", " Maximum log(L) = 171.465 below upper bound of -458095\n", " 2*Delta(log(L)) = 342.929 (305 data params - 65 model params = expected mean of 240; p-value = 1.41955e-05)\n", " Completed in 0.1s\n", " -- Adding Gauge Optimized (go0) --\n" ] } ], "source": [ "results_2plus1 = pygsti.do_long_sequence_gst(ds, mdl_2plus1_ideal, prepfids, measfids,\n", " germs, maxLengths, verbosity=3,\n", " advancedOptions={\"starting point\": \"target\"})" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "*** Creating workspace ***\n", "*** Generating switchboard ***\n", "Found standard clifford compilation from std1Q_XYI\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/enielse/research/pyGSTi/packages/pygsti/report/factory.py:785: UserWarning:\n", "\n", "Idle tomography failed:\n", "Label{layers}\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "*** Generating tables ***\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/enielse/research/pyGSTi/packages/pygsti/extras/rb/theory.py:200: UserWarning:\n", "\n", "Output may be unreliable because the model is not approximately trace-preserving.\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "*** Generating plots ***\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "Statistical hypothesis tests did NOT find inconsistency between the datasets at 5.00% significance.\n", "*** Merging into template file ***\n", "Output written to example_files/leakage_report directory\n", "*** Report Generation Complete! Total time 43.5036s ***\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pygsti.report.create_standard_report({'two-level': results_2level, 'three-level': results_3level_leakage_basis,\n", " 'two+one level': results_2plus1},\n", " \"example_files/leakage_report\", \"Leakage Example Report\",\n", " advancedOptions={'autosize': 'none'})" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Open the report [here](example_files/leakage_report/main.html)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.0" } }, "nbformat": 4, "nbformat_minor": 2 }