Skip to content

Commit 889cad7

Browse files
authored
Merge pull request #1069 from Parallel-in-Time/bibtex-bibbot-1068-685b193
pint.bib updates
2 parents 685b193 + 0cb5630 commit 889cad7

1 file changed

Lines changed: 24 additions & 0 deletions

File tree

_bibliography/pint.bib

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8509,6 +8509,15 @@ @article{AluthgeEtAl2026
85098509
year = {2026},
85108510
}
85118511

8512+
@unpublished{AntonucciEtAl2026,
8513+
abstract = {Time-parallel algorithms, such as Parareal, are well-understood for linear problems, but their convergence analysis for nonlinear, chaotic systems remains limited. This paper introduces a new theoretical framework for analysing time-decomposition methods as contraction mappings that converge in a finite number of iterations. We derive a finite-time guarantee linking the initial error, convergence rate, and iteration count, defined via a geometric outer--inner-ball condition. We apply this framework to Parareal, deriving explicit estimates for the convergence factor $β$ on nonlinear problems and showing it scales as $\mathcal{O}(h^2)$ when the macroscopic time grid is uniformly refined. Further, we address the failure of standard convergence criteria in chaotic regimes by introducing a proximity function. This chaos-aware criterion weighs solution discontinuities by the system's Lyapunov exponent (or the solver's Lipschitz constant), allowing the algorithm to converge to the correct statistical attractor without enforcing futile pointwise accuracy on divergent trajectories. Numerical experiments on the Logistic, Lorenz, and Lorenz-96 systems demonstrate that this approach decouples the iteration count from the total simulation time. By isolating the intrinsic mathematical bounds from hardware-dependent overheads, we establish that the method is strictly algorithmically scalable.},
8514+
author = {Giancarlo Antonino Antonucci and Raphael Andreas Hauser and Debasmita Samaddar and James Buchanan},
8515+
howpublished = {arXiv:2604.00855v1 [math.NA]},
8516+
title = {Finite-Time Convergence Guarantees for Time-Parallel Methods},
8517+
url = {https://arxiv.org/abs/2604.00855v1},
8518+
year = {2026},
8519+
}
8520+
85128521
@unpublished{AraujoEtAl2026,
85138522
abstract = {Standard gradient-based iteration algorithms for optimization, such as gradient descent and its various proximal-based extensions to nonsmooth problems, are known to converge slowly for ill-conditioned problems, sometimes requiring many tens of thousands of iterations in practice. Since these iterations are computed sequentially, they may present a computational bottleneck in large-scale parallel simulations. In this work, we present a "parallel-in-iteration" framework that allows one to parallelize across these iterations using multiple processors with the objective of reducing the wall-clock time needed to solve the underlying optimization problem. Our methodology is based on re-purposing parallel time integration algorithms for time-dependent differential equations, motivated by the fact that optimization algorithms often have interpretations as discretizations of time-dependent differential equations (such as gradient flow). Specifically in this work, we use the parallel-in-time method of multigrid reduction-in-time (MGRIT), but note that our approach permits in principle the use of any other parallel-in-time method. We numerically demonstrate the efficacy of our approach on two different model problems, including a standard convex quadratic problem and the nonsmooth elastic obstacle problem in one and two spatial dimensions. For our model problems, we observe fast MGRIT convergence analogous to its prototypical performance on partial differential equations of diffusion type. Some theory is presented to connect the convergence of MGRIT to the convergence of the underlying optimization algorithm. Theoretically predicted parallel speedup results are also provided.},
85148523
author = {G. H. M. Araújo and O. A. Krzysik and H. De Sterck},
@@ -8650,6 +8659,21 @@ @unpublished{Jimenez-CigaEtAl2026
86508659
year = {2026},
86518660
}
86528661

8662+
@article{JuEtAl2026,
8663+
author = {Ju, Guoliang and Xia, Xin and Zhou, Zeyuan and Gao, Yan and Gu, Huipeng and Tian, Jiake and Nie, Longfeng and Wang, Xucong and Xing, Wei},
8664+
doi = {10.3390/math14071177},
8665+
issn = {2227-7390},
8666+
journal = {Mathematics},
8667+
month = {April},
8668+
number = {7},
8669+
pages = {1177},
8670+
publisher = {MDPI AG},
8671+
title = {Pipelined Space-Time Krylov Method with Preconditioning: A Parallel-in-Time Algorithm for Biot’s Quasi-Static Poroelasticity},
8672+
url = {http://dx.doi.org/10.3390/math14071177},
8673+
volume = {14},
8674+
year = {2026},
8675+
}
8676+
86538677
@unpublished{KuleshovEtAl2026,
86548678
abstract = {Probabilistic forecasting of irregularly sampled time series is crucial in domains such as healthcare and finance, yet it remains a formidable challenge. Existing Neural Controlled Differential Equation (Neural CDE) approaches, while effective at modelling continuous dynamics, suffer from slow, inherently sequential computation, which restricts scalability and limits access to global context. We introduce UFO (U-Former ODE), a novel architecture that seamlessly integrates the parallelizable, multiscale feature extraction of U-Nets, the powerful global modelling of Transformers, and the continuous-time dynamics of Neural CDEs. By constructing a fully causal, parallelizable model, UFO achieves a global receptive field while retaining strong sensitivity to local temporal dynamics. Extensive experiments on five standard benchmarks -- covering both regularly and irregularly sampled time series -- demonstrate that UFO consistently outperforms ten state-of-the-art neural baselines in predictive accuracy. Moreover, UFO delivers up to 15$\times$ faster inference compared to conventional Neural CDEs, with consistently strong performance on long and highly multivariate sequences.},
86558679
author = {Ilya Kuleshov and Alexander Marusov and Alexey Zaytsev},

0 commit comments

Comments
 (0)