You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: _bibliography/pint.bib
+24Lines changed: 24 additions & 0 deletions
Original file line number
Diff line number
Diff line change
@@ -8509,6 +8509,15 @@ @article{AluthgeEtAl2026
8509
8509
year = {2026},
8510
8510
}
8511
8511
8512
+
@unpublished{AntonucciEtAl2026,
8513
+
abstract = {Time-parallel algorithms, such as Parareal, are well-understood for linear problems, but their convergence analysis for nonlinear, chaotic systems remains limited. This paper introduces a new theoretical framework for analysing time-decomposition methods as contraction mappings that converge in a finite number of iterations. We derive a finite-time guarantee linking the initial error, convergence rate, and iteration count, defined via a geometric outer--inner-ball condition. We apply this framework to Parareal, deriving explicit estimates for the convergence factor $β$ on nonlinear problems and showing it scales as $\mathcal{O}(h^2)$ when the macroscopic time grid is uniformly refined. Further, we address the failure of standard convergence criteria in chaotic regimes by introducing a proximity function. This chaos-aware criterion weighs solution discontinuities by the system's Lyapunov exponent (or the solver's Lipschitz constant), allowing the algorithm to converge to the correct statistical attractor without enforcing futile pointwise accuracy on divergent trajectories. Numerical experiments on the Logistic, Lorenz, and Lorenz-96 systems demonstrate that this approach decouples the iteration count from the total simulation time. By isolating the intrinsic mathematical bounds from hardware-dependent overheads, we establish that the method is strictly algorithmically scalable.},
8514
+
author = {Giancarlo Antonino Antonucci and Raphael Andreas Hauser and Debasmita Samaddar and James Buchanan},
8515
+
howpublished = {arXiv:2604.00855v1 [math.NA]},
8516
+
title = {Finite-Time Convergence Guarantees for Time-Parallel Methods},
8517
+
url = {https://arxiv.org/abs/2604.00855v1},
8518
+
year = {2026},
8519
+
}
8520
+
8512
8521
@unpublished{AraujoEtAl2026,
8513
8522
abstract = {Standard gradient-based iteration algorithms for optimization, such as gradient descent and its various proximal-based extensions to nonsmooth problems, are known to converge slowly for ill-conditioned problems, sometimes requiring many tens of thousands of iterations in practice. Since these iterations are computed sequentially, they may present a computational bottleneck in large-scale parallel simulations. In this work, we present a "parallel-in-iteration" framework that allows one to parallelize across these iterations using multiple processors with the objective of reducing the wall-clock time needed to solve the underlying optimization problem. Our methodology is based on re-purposing parallel time integration algorithms for time-dependent differential equations, motivated by the fact that optimization algorithms often have interpretations as discretizations of time-dependent differential equations (such as gradient flow). Specifically in this work, we use the parallel-in-time method of multigrid reduction-in-time (MGRIT), but note that our approach permits in principle the use of any other parallel-in-time method. We numerically demonstrate the efficacy of our approach on two different model problems, including a standard convex quadratic problem and the nonsmooth elastic obstacle problem in one and two spatial dimensions. For our model problems, we observe fast MGRIT convergence analogous to its prototypical performance on partial differential equations of diffusion type. Some theory is presented to connect the convergence of MGRIT to the convergence of the underlying optimization algorithm. Theoretically predicted parallel speedup results are also provided.},
8514
8523
author = {G. H. M. Araújo and O. A. Krzysik and H. De Sterck},
author = {Ju, Guoliang and Xia, Xin and Zhou, Zeyuan and Gao, Yan and Gu, Huipeng and Tian, Jiake and Nie, Longfeng and Wang, Xucong and Xing, Wei},
8664
+
doi = {10.3390/math14071177},
8665
+
issn = {2227-7390},
8666
+
journal = {Mathematics},
8667
+
month = {April},
8668
+
number = {7},
8669
+
pages = {1177},
8670
+
publisher = {MDPI AG},
8671
+
title = {Pipelined Space-Time Krylov Method with Preconditioning: A Parallel-in-Time Algorithm for Biot’s Quasi-Static Poroelasticity},
8672
+
url = {http://dx.doi.org/10.3390/math14071177},
8673
+
volume = {14},
8674
+
year = {2026},
8675
+
}
8676
+
8653
8677
@unpublished{KuleshovEtAl2026,
8654
8678
abstract = {Probabilistic forecasting of irregularly sampled time series is crucial in domains such as healthcare and finance, yet it remains a formidable challenge. Existing Neural Controlled Differential Equation (Neural CDE) approaches, while effective at modelling continuous dynamics, suffer from slow, inherently sequential computation, which restricts scalability and limits access to global context. We introduce UFO (U-Former ODE), a novel architecture that seamlessly integrates the parallelizable, multiscale feature extraction of U-Nets, the powerful global modelling of Transformers, and the continuous-time dynamics of Neural CDEs. By constructing a fully causal, parallelizable model, UFO achieves a global receptive field while retaining strong sensitivity to local temporal dynamics. Extensive experiments on five standard benchmarks -- covering both regularly and irregularly sampled time series -- demonstrate that UFO consistently outperforms ten state-of-the-art neural baselines in predictive accuracy. Moreover, UFO delivers up to 15$\times$ faster inference compared to conventional Neural CDEs, with consistently strong performance on long and highly multivariate sequences.},
8655
8679
author = {Ilya Kuleshov and Alexander Marusov and Alexey Zaytsev},
0 commit comments