[ { "assumptions": [ "Fixed quantum reservoir with classical linear readout.", "Comparisons control preprocessing/readout to isolate reservoir effect." ], "authors": [ "Unknown" ], "citation": "S01", "claims": [ "Entanglement can alter feature geometry and accuracy without necessarily implying asymptotic computational advantage." ], "conclusions": [ "Fair benchmarking must include simulability analysis and matched classical baselines." ], "contributions": [ "Separates empirical performance gain from complexity-theoretic advantage claims.", "Characterizes entanglement/simulability tradeoffs in QELM settings." ], "future_work": [ "Scale to larger systems and broader datasets with explicit simulability diagnostics." ], "key_equations": [ "f(x)=w^T z_Q(x), where z_Q(x) is the fixed quantum-reservoir embedding used by the linear readout." ], "limitations": [ "Task and architecture scope limits generalization to all QRC designs." ], "source_type": "paper", "summary": "Analyzes QELM performance versus entanglement structure and classical simulability, framing when improved task performance should or should not be interpreted as computational quantum advantage.", "title": "Entanglement and Classical Simulability in Quantum Extreme Learning Machines", "url": "https://arxiv.org/abs/2509.06873", "year": 2025 }, { "assumptions": [ "Readout training is classical and linear.", "Feature compression (PCA/AE) is applied before quantum embedding." ], "authors": [ "A. De Lorenzis", "M. P. Casado", "M. P. Estarellas", "N. Lo Gullo", "T. Lux", "F. Plastina", "A. Riera", "J. Settino" ], "citation": "S02", "claims": [ "Quantum reservoir feature maps can improve classifier accuracy relative to non-quantum baselines under tested settings." ], "conclusions": [ "Encoding and measurement choices are first-order factors for QELM effectiveness." ], "contributions": [ "End-to-end ablations over preprocessing, encoding, and reservoir dynamics for image tasks.", "Empirical evidence that encoding choice materially changes discrimination quality." ], "future_work": [ "Harder datasets and stricter classical-simulability comparisons." ], "key_equations": [ "y=argmax_c (W z_Q(x))_c with fixed quantum feature map z_Q and trained linear readout W." ], "limitations": [ "Dataset/task scope and simulator/hardware constraints may limit external validity." ], "source_type": "paper", "summary": "Systematically studies QELM image classification with PCA/autoencoder preprocessing, multiple encodings, and Hamiltonian choices, highlighting encoding-basis alignment effects.", "title": "Harnessing Quantum Extreme Learning Machines for image classification", "url": "https://arxiv.org/abs/2409.00998", "year": 2024 }, { "assumptions": [ "Reservoir dynamics fixed; optimization happens in measurement/readout.", "Kernel ridge assumptions for regularized regression." ], "authors": [ "Markus Gross", "Hans-Martin Rieser" ], "citation": "S03", "claims": [ "Optimized observables improve performance and can reduce training burden at larger qubit counts." ], "conclusions": [ "Measurement-operator design is a central leverage point in fixed-reservoir models." ], "contributions": [ "Bridges QRC observable selection with kernel methods.", "Provides practical decomposition strategies for hardware-constrained observables." ], "future_work": [ "Hardware experiments with constrained measurement sets and noise-aware optimization." ], "key_equations": [ "\\hat{f}(x)=k(x,X)(K+\u03bb I)^{-1}y", "\\hat{O}=\\arg\\min_O ||y-\u03a6_O \u03b2||_2^2+\u03bb||\u03b2||_2^2" ], "limitations": [ "Optimization quality depends on task-specific training data and model assumptions." ], "source_type": "paper", "summary": "Formulates QRC/QELM readout and observable design in a kernel-ridge framework to derive measurement operators minimizing prediction error for fixed reservoirs.", "title": "Kernel-based optimization of measurement operators for quantum reservoir computers", "url": "https://arxiv.org/abs/2602.14677", "year": 2026 }, { "assumptions": [ "Parametrized circuits with repeated data-encoding gates.", "Expressivity analyzed through accessible Fourier spectrum." ], "authors": [ "Maria Schuld", "Ryan Sweke", "Johannes Jakob Meyer" ], "citation": "S04", "claims": [ "Encoding, not only trainable ansatz depth, governs representational power." ], "conclusions": [ "Designing encoding maps is foundational for quantum ML performance." ], "contributions": [ "Formal link between encoding strategy and function class capacity.", "Conditions under which models become universal approximators." ], "future_work": [ "Task-specific encoding construction and capacity-generalization tradeoff studies." ], "key_equations": [ "f_\u03b8(x)=\u2211_{\u03c9\u2208\u03a9} c_\u03c9(\u03b8)e^{i\u03c9x}" ], "limitations": [ "Theoretical results do not by themselves guarantee empirical advantage." ], "source_type": "paper", "summary": "Shows that encoded quantum models admit Fourier decompositions where accessible frequencies are determined by encoding gates, clarifying expressivity limits and design.", "title": "The effect of data encoding on the expressive power of variational quantum machine learning models", "url": "https://arxiv.org/abs/2008.08605", "year": 2021 }, { "assumptions": [ "Quantum states define implicit feature maps.", "Learning performance tied to induced kernel geometry." ], "authors": [ "Maria Schuld" ], "citation": "S05", "claims": [ "Kernel training can match or outperform variational training for many supervised settings." ], "conclusions": [ "Quantum advantage claims require kernel-centric classical comparisons." ], "contributions": [ "Unifying kernel perspective for near-term supervised QML.", "Provides comparison lens against classical kernel baselines." ], "future_work": [ "Characterize datasets where induced quantum kernels provide robust gains." ], "key_equations": [ "k(x,x')=|\u27e8\u03c6(x)|\u03c6(x')\u27e9|^2" ], "limitations": [ "Does not prove universal practical superiority of quantum kernels." ], "source_type": "paper", "summary": "Establishes that many supervised quantum models can be recast as kernel methods, emphasizing data encoding as the central modeling ingredient.", "title": "Supervised quantum machine learning models are kernel methods", "url": "https://arxiv.org/abs/2101.11020", "year": 2021 }, { "assumptions": [ "Analog Rydberg dynamics used as fixed reservoir.", "Classical preprocessing/readout around quantum embedding." ], "authors": [ "Milan Kornjaca", "Hong-Ye Hu", "Chen Zhao", "et al." ], "citation": "S06", "claims": [ "QRC performance and utility can improve with system size in tested settings." ], "conclusions": [ "Analog neutral-atom platforms are viable for large-scale reservoir learning studies." ], "contributions": [ "Demonstrates scalable, gradient-free QRC workflow on public hardware.", "Reports comparative quantum-kernel-style utility on constructed tasks." ], "future_work": [ "Extend to harder real-world datasets and stronger classical simulability controls." ], "key_equations": [ "H(t)=\u03a9(t)/2 \u2211_j (|g_j\u27e9\u27e8r_j|+|r_j\u27e9\u27e8g_j|)+\u2211_{j