import matplotlib.pyplot as plt
import numpy as np
import requests
from iminuit import Minuit
from iminuit.cost import LeastSquares
from chromo.kinematics import FixedTarget, GeV, Momentum
from chromo.constants import nucleon_mass
from jacobi import propagate
from IPython.display import display
from functools import cache
Here we estimate the extrapolated uncertainty of the pp inelastic cross-section at 100 TeV before and after the LHC era.
We grab the raw data from the PDG website. Statistical and systematic uncertainties are added in quadrature.
@cache
def read(url):
= requests.get(url)
r = r.text
tab = []
xy = 11
skip for line in tab.strip().split("\n"):
if skip:
-= 1
skip continue
= line.split()
items = float(items[3])
xi = float(items[4])
yi = (0.5 * sum(float(i) ** 2 for i in items[5:9])) ** 0.5
yei = " ".join(items[9:11])
si if yei == 0.0: # skip entries without uncertainties
continue
xy.append((xi, yi, yei, si))=lambda p: p[0])
xy.sort(keyreturn np.array(xy, dtype=np.dtype([("plab", "float"), ("sig", "float"), ("sige", "float"), ("source", "U32")]))
= read("https://pdg.lbl.gov/2022/hadronic-xsections/rpp2022-pp_total.dat") data
# put air shower measurements into separate array
= ("HONDA 93", "ABREU 12", "ABBASI 15", "BALTRUSAITIS 84")
air_shower_measurements = np.all([data["source"] != source for source in air_shower_measurements], axis=0)
mask = data[mask]
accelerator = data[~mask] air_shower
To estimate the extrapolation uncertainty, we fit a COMPETE-like model to the data (Patrignani C., et al., Particle Data Group Chin. Phys. C, 40 (2016), Article 100001). We use only the accelerator data. The covariance matrix of the fit is propagated into an uncertainty on the cross-section at \(\sqrt{s} = 100\) GeV.
The original data is given in the lab frame and provides the proton momentum. We compute \(\sqrt{s}\) from the momentum with the kinematic tools in Chromo for convenience.
To simulate the effect of the pre LHC era, we cut away all data points with \(\sqrt s > 3\) TeV.
def compete(sqrts, h, m, pab, rab1, rab2, eta1, eta2):
= sqrts**2
s = 2 * nucleon_mass + m
sabm = s / sabm
x return h * np.log(x) ** 2 + pab + rab1 * x**-eta1 + rab2 * x**-eta2
= []
sqrts = []
sig = []
sige = []
src
for plab, s, se, source in accelerator:
if plab < 5:
continue
* GeV), "p", "p").ecm)
sqrts.append(FixedTarget(Momentum(plab
sig.append(s)
sige.append(se)
src.append(source)
= np.array(sqrts)
sqrts = np.array(sig)
sig = np.array(sige)
sige = np.array(src)
src
= LeastSquares(sqrts, sig, sige, compete)
cost = Minuit(cost, h=0.27, m=2.1, pab=34, rab1=13, rab2=7.4, eta1=0.451, eta2=0.549)
m = (0, None)
m.limits[:] # some parameters need to be fixed, because they are not constrained by this data set
"eta1", "eta2"] = True
m.fixed[
for emax in (1e6, 3e3):
= sqrts < emax
cost.mask =int(1e5)).fmin)
display(m.migrad(ncall= m.values
val = m.covariance
cov
plt.figure()
plt.errorbar(
sqrts[cost.mask],
sig[cost.mask],
sige[cost.mask],="o",
fmt="PDG 2022 (accelerator)",
label
)= np.array([FixedTarget(Momentum(plab * GeV), "p", "p").ecm for plab in air_shower["plab"]])
sqrts_air_shower
plt.errorbar(
sqrts_air_shower,"sig"],
air_shower["sige"],
air_shower[="o",
fmt="PDG 2022 (air shower)",
label
)= np.geomspace(10, 1e5, 1000)
msqrts = propagate(lambda v: compete(msqrts, *v), val, cov)
msig, cov_msig = np.diag(cov_msig) ** 0.5
msige ="k", label="COMPETE-like model")
plt.plot(msqrts, msig, color- msige, msig + msige, color="k", alpha=0.5)
plt.fill_between(msqrts, msig
plt.semilogx()r"$\sqrt{s}$ / GeV")
plt.xlabel("inelastic cross-section / mb")
plt.ylabel(=False, title="pp", loc="upper left")
plt.legend(frameon0, 350)
plt.ylim(
plt.annotate(f"uncertainty {msige[-1]:.0f} mb ({msige[-1]/msig[-1] * 100:.1f} %)",
1e5, 250), xytext=(1e5, 300),
(={"facecolor": "k", "width": 1},
arrowprops="right"
ha
)
if emax == 1e6:
= plt.gca().inset_axes((0.12, 0.42, 0.3, 0.3))
inax = 1e3 < sqrts
mask = sqrts[mask]
sqrts_inset = src[mask]
src_inset = sqrts[mask]
x_inset = sig[mask] - compete(sqrts_inset, *val)
res_inset = sige[mask]
rese_inset # sort all arrays according to publication year
= [int(x.split()[1]) for x in src_inset]
pub_year = np.argsort(pub_year)
ind for x in (src_inset, sqrts_inset, x_inset, res_inset, rese_inset):
= x[ind]
x[:] ="o")
inax.errorbar(src_inset, res_inset, rese_inset, fmt0, color="k")
inax.axhline(# inax.set_ylim(-5, 5)
"residual / mb")
inax.set_ylabel(-6, 6, 7))
inax.set_yticks(np.linspace("x", rotation=90)
inax.tick_params(="small")
inax.tick_params(labelsize
= plt.gca().inset_axes((0.55, 0.6, 0.3, 0.2))
inax "sig"] - compete(sqrts_air_shower, *val)) / air_shower["sige"], 1, fmt="C1o")
inax.errorbar(sqrts_air_shower, (air_shower[0, color="k")
inax.axhline(-2, 2)
inax.set_ylim("log")
inax.set_xscale("$\\sqrt{s}$ / GeV")
inax.set_xlabel("pull")
inax.set_ylabel(
f"cross_section_extrapolation_uncertainty_{np.log10(emax):.0f}.pdf") plt.savefig(
Migrad | |
FCN = 93.98 (χ²/ndof = 0.7) | Nfcn = 1948 |
EDM = 0.000176 (Goal: 0.0002) | |
Valid Minimum | Below EDM threshold (goal x 10) |
SOME parameters at limit | Below call limit |
Hesse ok | Covariance accurate |
Migrad | |
FCN = 75.18 (χ²/ndof = 0.6) | Nfcn = 2590 |
EDM = 0.000118 (Goal: 0.0002) | |
Valid Minimum | Below EDM threshold (goal x 10) |
SOME parameters at limit | Below call limit |
Hesse ok | Covariance accurate |
While air shower measurements individually cannot compete with the accuracy of the accelerator measurements, we find that their average pull is clearly reduced if the post-LHC data is included in the fit. This means that air shower measurements are actually pulling the model into the right direction, despite the large systematic uncertainties.