From f8f0cdc02105c47b3c6e343dd1e9b37920f29cc4 Mon Sep 17 00:00:00 2001 From: Jonas Eschle Date: Wed, 10 Apr 2024 17:14:16 -0400 Subject: [PATCH] enh: update to zfit 0.20 --- TensorFlow/HPC_with_TensorFlow.ipynb | 11 +++ _unused/Summary.ipynb | 3 + _unused/kstmumu_tutorial.py | 6 +- .../TensorFlow/HPC_with_TensorFlow.ipynb | 11 +++ .../05 - Exploring the FitResult.ipynb | 10 +-- .../components/20 - Composite Models.ipynb | 4 +- .../components/30 - Binned models.ipynb | 6 +- .../components/33 - Binned fits.ipynb | 11 ++- .../50 - Custom code and run mode.ipynb | 10 ++- .../components/60 - Custom PDF.ipynb | 34 +++---- .../components/62 - Multidim Custom PDF.ipynb | 60 +++++-------- .../tutorials/components/80 - Toy Study.ipynb | 12 +-- .../90 - Serialization basics.ipynb | 13 ++- ...nts_simultaneous_fit_discovery_splot.ipynb | 90 ++++++++++--------- _website/tutorials/guides/custom_models.ipynb | 73 ++++++++------- .../tutorials/introduction/Introduction.ipynb | 35 +++++--- .../introduction/Introduction_long.ipynb | 2 +- .../tutorials/introduction/Quickstart.ipynb | 19 ++-- components/05 - Exploring the FitResult.ipynb | 10 +-- components/20 - Composite Models.ipynb | 4 +- components/30 - Binned models.ipynb | 6 +- components/33 - Binned fits.ipynb | 11 ++- .../50 - Custom code and run mode.ipynb | 10 ++- components/60 - Custom PDF.ipynb | 34 +++---- components/62 - Multidim Custom PDF.ipynb | 60 +++++-------- components/80 - Toy Study.ipynb | 12 +-- components/90 - Serialization basics.ipynb | 13 ++- ...nts_simultaneous_fit_discovery_splot.ipynb | 90 ++++++++++--------- guides/custom_models.ipynb | 73 ++++++++------- introduction/Introduction.ipynb | 35 +++++--- introduction/Introduction_long.ipynb | 2 +- introduction/Quickstart.ipynb | 19 ++-- 32 files changed, 408 insertions(+), 381 deletions(-) diff --git a/TensorFlow/HPC_with_TensorFlow.ipynb b/TensorFlow/HPC_with_TensorFlow.ipynb index 1adf1f7..b20a363 100644 --- a/TensorFlow/HPC_with_TensorFlow.ipynb +++ b/TensorFlow/HPC_with_TensorFlow.ipynb @@ -1527,6 +1527,7 @@ " x2 = np.mean(logx)\n", " return x + x1 + x2\n", "\n", + "\n", "calc_np_numba = calc_np # numba.jit(nopython=True, parallel=True)(calc_np)" ] }, @@ -1570,6 +1571,7 @@ "\n", " return x + x1 + x2\n", "\n", + "\n", "calc_tf_func = tf.function(calc_tf, autograph=False)" ] }, @@ -1698,9 +1700,12 @@ " sum_init += x\n", " return sum_init\n", "\n", + "\n", "calc_tf_func2 = tf.function(calc_tf2, autograph=False)\n", "\n", "# @numba.njit(parallel=True) # njit is equal to jit(nopython=True), meaning \"compile everything or raise error\"\n", + "\n", + "\n", "def calc_numba2(x, n):\n", " sum_init = np.zeros_like(x)\n", " for i in range(1, n + 1):\n", @@ -1882,9 +1887,11 @@ "def true_fn():\n", " return 1.\n", "\n", + "\n", "def false_fn():\n", " return 0.\n", "\n", + "\n", "value = tf.cond(tf.greater(111., 42.), true_fn=true_fn, false_fn=false_fn)" ] }, @@ -1927,9 +1934,11 @@ "def cond(x, y):\n", " return x > y\n", "\n", + "\n", "def body(x, y):\n", " return x / 2, y + 1\n", "\n", + "\n", "x, y = tf.while_loop(cond=cond,\n", " body=body,\n", " loop_vars=[100., 1.])" @@ -2013,6 +2022,7 @@ "def do_map(func, tensor):\n", " return tf.map_fn(func, tensor)\n", "\n", + "\n", "do_map(tf.math.sin, rnd1_big)" ] }, @@ -2030,6 +2040,7 @@ "def do_map_vec(func, tensor):\n", " return tf.vectorized_map(func, tensor)\n", "\n", + "\n", "do_map_vec(tf.math.sin, rnd1_big)" ] }, diff --git a/_unused/Summary.ipynb b/_unused/Summary.ipynb index 4713f4a..f9184b5 100644 --- a/_unused/Summary.ipynb +++ b/_unused/Summary.ipynb @@ -252,9 +252,12 @@ "source": [ "# This shortcut function will be available in zfit, but here\n", "# we use the core one to highlight the use of tensorflow graphs\n", + "\n", + "\n", "def api_unbinned_nll(pdf, data, norm_range):\n", " return zfit.core.loss.unbinned_nll(pdf.prob(data, norm_range=norm_range))\n", "\n", + "\n", "mu1 = zfit.Parameter(\"mu\", 5.0, 0., 10)\n", "sigma1 = zfit.Parameter(\"sigma\", 1, 0.1, 5.)\n", "gauss1 = zfit.pdf.Gauss(mu=mu1, sigma=sigma1)\n", diff --git a/_unused/kstmumu_tutorial.py b/_unused/kstmumu_tutorial.py index 4103fe8..fb88501 100755 --- a/_unused/kstmumu_tutorial.py +++ b/_unused/kstmumu_tutorial.py @@ -256,9 +256,9 @@ def plot_pdf_data(data, model, title, n_bins=40): histtype="step", ) # plot the pdfs - y = model.pdf(x).numpy() - y_sig = (model.pdfs[0].pdf(x) * model.fracs[0]).numpy() # notice the frac! - y_bkg = (model.pdfs[1].pdf(x) * model.fracs[1]).numpy() # notice the frac! + y = model.pdf(x) + y_sig = model.pdfs[0].pdf(x) * model.fracs[0] # notice the frac! + y_bkg = model.pdfs[1].pdf(x) * model.fracs[1] # notice the frac! plt.plot(x, y * plot_scaling, label="Sum - Model", linewidth=linewidth * 2) plt.plot( diff --git a/_website/tutorials/TensorFlow/HPC_with_TensorFlow.ipynb b/_website/tutorials/TensorFlow/HPC_with_TensorFlow.ipynb index 1adf1f7..b20a363 100644 --- a/_website/tutorials/TensorFlow/HPC_with_TensorFlow.ipynb +++ b/_website/tutorials/TensorFlow/HPC_with_TensorFlow.ipynb @@ -1527,6 +1527,7 @@ " x2 = np.mean(logx)\n", " return x + x1 + x2\n", "\n", + "\n", "calc_np_numba = calc_np # numba.jit(nopython=True, parallel=True)(calc_np)" ] }, @@ -1570,6 +1571,7 @@ "\n", " return x + x1 + x2\n", "\n", + "\n", "calc_tf_func = tf.function(calc_tf, autograph=False)" ] }, @@ -1698,9 +1700,12 @@ " sum_init += x\n", " return sum_init\n", "\n", + "\n", "calc_tf_func2 = tf.function(calc_tf2, autograph=False)\n", "\n", "# @numba.njit(parallel=True) # njit is equal to jit(nopython=True), meaning \"compile everything or raise error\"\n", + "\n", + "\n", "def calc_numba2(x, n):\n", " sum_init = np.zeros_like(x)\n", " for i in range(1, n + 1):\n", @@ -1882,9 +1887,11 @@ "def true_fn():\n", " return 1.\n", "\n", + "\n", "def false_fn():\n", " return 0.\n", "\n", + "\n", "value = tf.cond(tf.greater(111., 42.), true_fn=true_fn, false_fn=false_fn)" ] }, @@ -1927,9 +1934,11 @@ "def cond(x, y):\n", " return x > y\n", "\n", + "\n", "def body(x, y):\n", " return x / 2, y + 1\n", "\n", + "\n", "x, y = tf.while_loop(cond=cond,\n", " body=body,\n", " loop_vars=[100., 1.])" @@ -2013,6 +2022,7 @@ "def do_map(func, tensor):\n", " return tf.map_fn(func, tensor)\n", "\n", + "\n", "do_map(tf.math.sin, rnd1_big)" ] }, @@ -2030,6 +2040,7 @@ "def do_map_vec(func, tensor):\n", " return tf.vectorized_map(func, tensor)\n", "\n", + "\n", "do_map_vec(tf.math.sin, rnd1_big)" ] }, diff --git a/_website/tutorials/components/05 - Exploring the FitResult.ipynb b/_website/tutorials/components/05 - Exploring the FitResult.ipynb index 96b67e7..9f5fc5c 100644 --- a/_website/tutorials/components/05 - Exploring the FitResult.ipynb +++ b/_website/tutorials/components/05 - Exploring the FitResult.ipynb @@ -35,15 +35,11 @@ "metadata": {}, "outputs": [], "source": [ - "obs = zfit.Space('x', limits=(0, 10))\n", + "obs = zfit.Space('x', 0, 10)\n", "mu = zfit.Parameter('mu', 5, 0, 10)\n", "sigma = zfit.Parameter('sigma', 1, 0, 10)\n", "nsig = zfit.Parameter('nsig', 1000, 0, 10000)\n", - "gauss_nonext = zfit.pdf.Gauss(obs=obs, mu=mu, sigma=sigma,\n", - " # extended=nsig # requires zfit>=0.13\n", - " )\n", - "gauss = gauss_nonext.create_extended(nsig)\n", - "\n", + "gauss = zfit.pdf.Gauss(obs=obs, mu=mu, sigma=sigma,extended=nsig)\n", "data = gauss.sample()\n", "print(f\"The sampled data (poisson fluctuated) has {data.nevents} events.\")" ] @@ -319,7 +315,7 @@ "outputs": [], "source": [ "weighted_data = zfit.Data.from_tensor(obs=obs, tensor=data.value(), weights=znp.random.uniform(0.1, 5, size=(data.nevents,)))\n", - "weighted_nll = zfit.loss.UnbinnedNLL(model=gauss_nonext, data=weighted_data)\n", + "weighted_nll = zfit.loss.UnbinnedNLL(model=gauss, data=weighted_data)\n", "weighted_result = minimizer.minimize(weighted_nll)" ] }, diff --git a/_website/tutorials/components/20 - Composite Models.ipynb b/_website/tutorials/components/20 - Composite Models.ipynb index 761f3d5..f972cea 100644 --- a/_website/tutorials/components/20 - Composite Models.ipynb +++ b/_website/tutorials/components/20 - Composite Models.ipynb @@ -33,7 +33,7 @@ "source": [ "frac = zfit.Parameter(\"frac_gauss\", 0.5, 0, 1)\n", "\n", - "obs1 = zfit.Space('obs1', limits=(-5, 5))\n", + "obs1 = zfit.Space('obs1',-5, 5)\n", "\n", "mu1 = zfit.Parameter(\"mu1\", 1.)\n", "sigma1 = zfit.Parameter(\"sigma1\", 1.)\n", @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "obs2 = zfit.Space('obs2', limits=(-3, 7))\n", + "obs2 = zfit.Space('obs2', -3, 7)\n", "mu3 = zfit.Parameter(\"mu3\", 1.)\n", "sigma3 = zfit.Parameter(\"sigma3\", 1.)\n", "gauss3 = zfit.pdf.Gauss(obs=obs2, mu=mu3, sigma=sigma3) # different obs than above." diff --git a/_website/tutorials/components/30 - Binned models.ipynb b/_website/tutorials/components/30 - Binned models.ipynb index 26feeea..1c88ee3 100644 --- a/_website/tutorials/components/30 - Binned models.ipynb +++ b/_website/tutorials/components/30 - Binned models.ipynb @@ -52,7 +52,7 @@ "\n", "normal_np = np.random.normal(loc=2., scale=3., size=10000)\n", "\n", - "obs = zfit.Space(\"x\", limits=(-10, 10))\n", + "obs = zfit.Space(\"x\", -10, 10)\n", "\n", "mu = zfit.Parameter(\"mu\", 1., -4, 6)\n", "sigma = zfit.Parameter(\"sigma\", 1., 0.1, 10)\n", @@ -81,7 +81,7 @@ "obs_bin = zfit.Space(\"x\", binning=binning)\n", "\n", "data = data_nobin.to_binned(obs_bin)\n", - "model = zfit.pdf.BinnedFromUnbinnedPDF(model_nobin, obs_bin)\n", + "model = model_nobin.to_binned(obs_bin)\n", "loss = zfit.loss.BinnedNLL(model, data)" ] }, @@ -160,7 +160,7 @@ "\n", "plt.figure()\n", "mplhep.histplot(model_hist, density=1, label=\"model\")\n", - "mplhep.histplot(data.to_hist(), density=1, label=\"data\")\n", + "mplhep.histplot(data, density=1, label=\"data\")\n", "plt.legend()\n", "plt.title(\"After fit\")" ] diff --git a/_website/tutorials/components/33 - Binned fits.ipynb b/_website/tutorials/components/33 - Binned fits.ipynb index ff9a8dc..2f7976a 100644 --- a/_website/tutorials/components/33 - Binned fits.ipynb +++ b/_website/tutorials/components/33 - Binned fits.ipynb @@ -64,7 +64,7 @@ "source": [ "normal_np = np.random.normal(loc=2., scale=1.3, size=10000)\n", "\n", - "obs = zfit.Space(\"x\", limits=(-10, 10))\n", + "obs = zfit.Space(\"x\", -10, 10)\n", "\n", "mu = zfit.Parameter(\"mu\", 1., -4, 6)\n", "sigma = zfit.Parameter(\"sigma\", 1., 0.1, 10)\n", @@ -487,8 +487,7 @@ "metadata": {}, "outputs": [], "source": [ - "bkg_hist = zfit.Data.from_numpy(obs=obs, array=np.random.exponential(scale=20, size=100_000) - 10).to_binned(\n", - " obs_binned)\n", + "bkg_hist = zfit.Data(np.random.exponential(scale=20, size=100_000) - 10, obs=obs_binned)\n", "bkg_hist_m1 = zfit.Data.from_numpy(obs=obs,\n", " array=np.random.exponential(scale=35, size=100_000) - 10).to_binned(\n", " obs_binned)\n", @@ -571,8 +570,8 @@ "outputs": [], "source": [ "modifier_constraints = zfit.constraint.GaussianConstraint(params=list(modifiers.values()), observation=np.ones(len(modifiers)),\n", - " uncertainty=np.ones(len(modifiers)))\n", - "# alpha_constraint = zfit.constraint.GaussianConstraint(alpha, 0, 1)" + " uncertainty=0.1 * np.ones(len(modifiers)))\n", + "alpha_constraint = zfit.constraint.GaussianConstraint(alpha, 0, 1)" ] }, { @@ -581,7 +580,7 @@ "metadata": {}, "outputs": [], "source": [ - "loss_binned = zfit.loss.ExtendedBinnedNLL(model, data, constraints=modifier_constraints)" + "loss_binned = zfit.loss.ExtendedBinnedNLL(model, data, constraints=[modifier_constraints, alpha_constraint])" ] }, { diff --git a/_website/tutorials/components/50 - Custom code and run mode.ipynb b/_website/tutorials/components/50 - Custom code and run mode.ipynb index 1a4c5d3..0a35160 100644 --- a/_website/tutorials/components/50 - Custom code and run mode.ipynb +++ b/_website/tutorials/components/50 - Custom code and run mode.ipynb @@ -51,6 +51,7 @@ "import numpy as np\n", "import tensorflow as tf\n", "import zfit\n", + "import zfit.z.numpy as znp # this is numpy-like\n", "from zfit import z # this is basically tf, just wrapped" ] }, @@ -166,7 +167,7 @@ "metadata": {}, "outputs": [], "source": [ - "graph_func(z.constant(5))" + "graph_func(znp.array(5))" ] }, { @@ -175,7 +176,7 @@ "metadata": {}, "outputs": [], "source": [ - "graph_func(z.constant(7))" + "graph_func(znp.array(7))" ] }, { @@ -210,7 +211,7 @@ "outputs": [], "source": [ "try:\n", - " graph_func_fail(z.constant(5.))\n", + " graph_func_fail(znp.array(5.))\n", "except NotImplementedError as error:\n", " print(f\"Error was raised, last line: {error}\")" ] @@ -262,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "graph_func_fail(z.constant(5))" + "graph_func_fail(znp.array(5))" ] }, { @@ -299,6 +300,7 @@ "def numpy_func(x, a):\n", " return np.square(x) * a\n", "\n", + "\n", "@z.function\n", "def wrapped_numpy_func(x_tensor, a_tensor):\n", " result = z.py_function(func=numpy_func, inp=[x_tensor, a_tensor], Tout=zfit.ztypes.float) # or tf.float64\n", diff --git a/_website/tutorials/components/60 - Custom PDF.ipynb b/_website/tutorials/components/60 - Custom PDF.ipynb index 8c5c051..9899c83 100644 --- a/_website/tutorials/components/60 - Custom PDF.ipynb +++ b/_website/tutorials/components/60 - Custom PDF.ipynb @@ -44,11 +44,12 @@ " _N_OBS = 1 # dimension, can be omitted\n", " _PARAMS = ['mean', 'std'] # the name of the parameters\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " x = z.unstack_x(x) # returns a list with the columns: do x, y, z = z.unstack_x(x) for 3D\n", - " mean = self.params['mean']\n", - " std = self.params['std']\n", - " return z.exp(- ((x - mean) / std) ** 2)" + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " x0 = x[0] # using the 0th axis\n", + " mean = params['mean']\n", + " std = params['std']\n", + " return z.exp(- ((x0 - mean) / std) ** 2)" ] }, { @@ -70,18 +71,19 @@ "source": [ "class MyGauss(zfit.pdf.BasePDF):\n", "\n", - " def __init__(self, mean, std, obs, extended=None, norm=None, name=None):\n", + " def __init__(self, mean, std, obs, extended=None, norm=None, name=None, label=None):\n", " params = {'mean': mean, # 'mean' is the name as it will be named in the PDF, mean is just the parameter to create the PDF\n", " 'std': std\n", " }\n", " super().__init__(obs=obs, params=params, extended=extended, norm=norm,\n", - " name=name)\n", + " name=name, label=label)\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " x = z.unstack_x(x)\n", - " mean = self.params['mean']\n", - " std = self.params['std']\n", - " return z.exp(- ((x - mean) / std) ** 2)" + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " x0 = x[0] # using the 0th axis\n", + " mean = params['mean']\n", + " std = params['std']\n", + " return z.exp(- ((x0 - mean) / std) ** 2)" ] }, { @@ -90,10 +92,10 @@ "metadata": {}, "outputs": [], "source": [ - "obs = zfit.Space('obs1', limits=(-3, 6))\n", + "obs = zfit.Space('obs1', -3, 6)\n", "\n", "data_np = np.random.random(size=1000)\n", - "data = zfit.data.Data.from_numpy(array=data_np, obs=obs)" + "data = zfit.Data(data_np, obs=obs)" ] }, { @@ -153,7 +155,7 @@ "outputs": [], "source": [ "def gauss_integral_from_any_to_any(limits, params, model):\n", - " lower, upper = limits.limit1d\n", + " lower, upper = limits.v1.limits\n", " mean = params['mean']\n", " std = params['std']\n", " # write your integral here\n", @@ -166,7 +168,7 @@ "metadata": {}, "outputs": [], "source": [ - "limits = zfit.Space(axes=0, limits=(zfit.Space.ANY_LOWER, zfit.Space.ANY_UPPER))\n", + "limits = zfit.Space(axes=0, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", "MyGauss.register_analytic_integral(func=gauss_integral_from_any_to_any, limits=limits)" ] }, diff --git a/_website/tutorials/components/62 - Multidim Custom PDF.ipynb b/_website/tutorials/components/62 - Multidim Custom PDF.ipynb index 4aa67ed..f3c76a0 100644 --- a/_website/tutorials/components/62 - Multidim Custom PDF.ipynb +++ b/_website/tutorials/components/62 - Multidim Custom PDF.ipynb @@ -23,6 +23,7 @@ "source": [ "import numpy as np\n", "import zfit\n", + "import zfit.z.numpy as znp\n", "from zfit import z" ] }, @@ -47,13 +48,18 @@ " _N_OBS = 3 # dimension, can be omitted\n", " _PARAMS = ['xshift', 'yshift'] # the name of the parameters\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " x1, x2, x3 = x.unstack_x() # returns a list with the columns: do x1, x2, x3 = z.unstack_x(x) for 3D\n", - " xshift = self.params['xshift']\n", - " yshift = self.params['yshift']\n", - " x1 = x1 + xshift\n", - " x2 = x2 + yshift\n", - " return z.sqrt(z.square(x1) + z.square(x2) + z.square(x3)) # dummy calculations" + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " x0 = x[0]\n", + " x1 = x[1]\n", + " x2 = x[2]\n", + " # alternatively, we could use the following line to get the same result\n", + " # x0, x1, x2 = z.unstack_x(x) # returns a list with the columns: do x1, x2, x3 = z.unstack_x(x) for 3D\n", + " xshift = params['xshift']\n", + " yshift = params['yshift']\n", + " x0 = x0 + xshift\n", + " x1 = x1 + yshift\n", + " return znp.sqrt(znp.square(x0) + x1 ** 2 + znp.power(x2, 2)) # dummy calculations, all are equivalent" ] }, { @@ -102,17 +108,8 @@ "metadata": {}, "outputs": [], "source": [ - "probs = abs_vector.pdf(data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "probs_np = zfit.run(probs)\n", - "print(probs_np[:20])" + "probs = abs_vector.pdf(data)\n", + "print(probs[:20])" ] }, { @@ -139,7 +136,7 @@ "outputs": [], "source": [ "def abs_vector_integral_from_any_to_any(limits, params, model):\n", - " lower, upper = limits.limits\n", + " lower, upper = limits.v1.limits\n", " # write your integral here\n", " return 42. # dummy integral, must be a scalar!" ] @@ -157,24 +154,11 @@ "metadata": {}, "outputs": [], "source": [ - "limits_to_integrate = (((zfit.Space.ANY_LOWER, zfit.Space.ANY_LOWER, zfit.Space.ANY_LOWER),),\n", - " ((zfit.Space.ANY_UPPER,zfit.Space.ANY_UPPER,zfit.Space.ANY_UPPER),))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we need the axis we will integrate over" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "axes_to_integrate = (0, 1, 2) # implies this is over all axes of the pdf" + "limit0 = zfit.Space(axes=0, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", + "limit1 = zfit.Space(axes=1, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", + "limit2 = zfit.Space(axes=2, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", + "limits = limit0 * limit1 * limit2 # creates the 3D limits\n", + "print(limits)" ] }, { @@ -190,8 +174,6 @@ "metadata": {}, "outputs": [], "source": [ - "limits = zfit.Space(axes=axes_to_integrate, limits=limits_to_integrate)\n", - "\n", "AbsVectorShifted.register_analytic_integral(func=abs_vector_integral_from_any_to_any, limits=limits,\n", " priority=51,\n", " supports_norm_range=False, # False by default, but could be set to\n", diff --git a/_website/tutorials/components/80 - Toy Study.ipynb b/_website/tutorials/components/80 - Toy Study.ipynb index feb7c91..b8afe22 100644 --- a/_website/tutorials/components/80 - Toy Study.ipynb +++ b/_website/tutorials/components/80 - Toy Study.ipynb @@ -27,7 +27,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We gonna build a simple model, just a Gaussian. But, given the well defined workflow of zfit, `model` can be exchanged by _any_ complicated composition or custom model." + "We will build a simple model, just a Gaussian. But, given the well defined workflow of zfit, `model` can be exchanged by _any_ complicated composition or custom model." ] }, { @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "obs = zfit.Space('x', (-5, 5))\n", + "obs = zfit.Space('x', -5, 5)\n", "\n", "sigma = zfit.Parameter('sigma', 1, 0.1, 10)\n", "mu = zfit.Parameter('mu', 0, -1, 1)\n", @@ -56,14 +56,14 @@ "metadata": {}, "outputs": [], "source": [ - "sampler = model.create_sampler(n=3000, fixed_params=True)" + "sampler = model.create_sampler(n=3000)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "So far, no sampling happened yet. But first, we build our whole chain, just using our sampler as `data`." + "This takes a while, as the first resampling is happening now. But first, we build our whole chain, just using our sampler as `data`." ] }, { @@ -74,8 +74,8 @@ "source": [ "nll = zfit.loss.UnbinnedNLL(model, sampler)\n", "\n", - "from zfit.minimize import \\\n", - " DefaultToyStrategy # this stategy does not raise an error with NaNs but returns a non-converged `FitResult`\n", + "# this stategy does not raise an error with NaNs but returns a non-converged `FitResult`\n", + "from zfit.minimize import DefaultToyStrategy\n", "\n", "minimizer = zfit.minimize.Minuit(strategy=DefaultToyStrategy(), verbosity=0, tol=1e-3, use_minuit_grad=True)" ] diff --git a/_website/tutorials/components/90 - Serialization basics.ipynb b/_website/tutorials/components/90 - Serialization basics.ipynb index 18365e0..e83adbf 100644 --- a/_website/tutorials/components/90 - Serialization basics.ipynb +++ b/_website/tutorials/components/90 - Serialization basics.ipynb @@ -50,7 +50,7 @@ "source": [ "mu = zfit.Parameter(\"mu\", 1.2, -4, 5)\n", "sigma = zfit.Parameter(\"sigma\", 3, 0, 10)\n", - "obs = zfit.Space(\"obs1\", limits=(-10, 20))\n", + "obs = zfit.Space(\"obs1\", -10, 20)\n", "model = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)" ] }, @@ -91,7 +91,7 @@ }, "outputs": [], "source": [ - "x = np.linspace(*obs.limit1d, 1000)\n", + "x = np.linspace(*obs.v1.limits, 1000)\n", "mu.set_value(1.5)\n", "sigma.set_value(2)\n", "mplhep.histplot(data.to_binned(50), density=True, label=\"data\")\n", @@ -391,7 +391,7 @@ "\n", "with asdf.open(\"data.asdf\") as f:\n", " tree = f.tree\n", - " data = zfit.Data.from_asdf(f)\n" + " data = zfit.Data.from_asdf(f)" ] }, { @@ -504,6 +504,13 @@ "source": [ "zfit.hs3.loads(hs3dumped)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/_website/tutorials/guides/constraints_simultaneous_fit_discovery_splot.ipynb b/_website/tutorials/guides/constraints_simultaneous_fit_discovery_splot.ipynb index f53672f..9e94b92 100644 --- a/_website/tutorials/guides/constraints_simultaneous_fit_discovery_splot.ipynb +++ b/_website/tutorials/guides/constraints_simultaneous_fit_discovery_splot.ipynb @@ -36,9 +36,9 @@ "import numpy as np\n", "import particle.literals as lp\n", "import tensorflow as tf\n", - "import zfit\n", + "import zfit.z.numpy\n", "\n", - "plt.rcParams['figure.figsize'] = (8,6)" + "plt.rcParams['figure.figsize'] = (8, 6)" ] }, { @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "mu_true = lp.B_plus.mass * u.MeV\n", + "mu_true = lp.B_plus.mass * u.MeV\n", "sigma_true = 50 * u.MeV\n", "\n", "# number of signal and background\n", @@ -64,7 +64,7 @@ "# create some data\n", "signal_np = np.random.normal(loc=mu_true, scale=sigma_true, size=n_sig_rare)\n", "bkg_np_raw = np.random.exponential(size=20000, scale=700)\n", - "bkg_np = bkg_np_raw[bkg_np_raw<1000][:n_bkg_rare] + 5000 # just cutting right, but zfit could also cut" + "bkg_np = bkg_np_raw[bkg_np_raw < 1000][:n_bkg_rare] + 5000 # just cutting right, but zfit could also cut" ] }, { @@ -74,7 +74,7 @@ "outputs": [], "source": [ "# Firstly, the observable and its range is defined\n", - "obs = zfit.Space('Bmass', (5000, 6000)) # for whole range" + "obs = zfit.Space('Bmass', 5000, 6000) # for whole range" ] }, { @@ -83,8 +83,12 @@ "metadata": {}, "outputs": [], "source": [ - "# load data into zfit\n", - "data = zfit.Data.from_numpy(obs=obs, array=np.concatenate([signal_np, bkg_np], axis=0))" + "# load data into zfit and let zfit concatenate the data\n", + "signal_data = zfit.Data(signal_np, obs=obs)\n", + "bkg_data = zfit.Data(bkg_np, obs=obs)\n", + "data = zfit.data.concat([signal_data, bkg_data])\n", + "# (we could also do it manually)\n", + "# data = zfit.Data(array=np.concatenate([signal_np, bkg_np], axis=0), obs=obs)" ] }, { @@ -96,20 +100,16 @@ "# Parameters are specified: (name (unique), initial, lower, upper) whereas lower, upper are optional\n", "mu = zfit.Parameter('mu', 5279, 5100, 5400)\n", "sigma = zfit.Parameter('sigma', 20, 1, 200)\n", - "signal = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)\n", + "sig_yield = zfit.Parameter('sig_yield', n_sig_rare + 30,\n", + " step_size=3) # step size: default is small, use appropriate\n", + "signal = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs, extended=sig_yield)\n", "\n", "lam = zfit.Parameter('lambda', -0.002, -0.1, -0.00001, step_size=0.001) # floating, also without limits\n", - "comb_bkg = zfit.pdf.Exponential(lam, obs=obs)\n", - "\n", - "sig_yield = zfit.Parameter('sig_yield', n_sig_rare + 30,\n", - " step_size=3) # step size: default is small, use appropriate\n", "bkg_yield = zfit.Parameter('bkg_yield', n_bkg_rare - 40, step_size=1)\n", - "# Create extended PDFs\n", - "extended_sig = signal.create_extended(sig_yield)\n", - "extended_bkg = comb_bkg.create_extended(bkg_yield)\n", + "comb_bkg = zfit.pdf.Exponential(lam, obs=obs, extended=bkg_yield)\n", "\n", "# The final model is the combination of the signal and backgrond PDF\n", - "model = zfit.pdf.SumPDF([extended_bkg, extended_sig])" + "model = zfit.pdf.SumPDF([comb_bkg, signal])" ] }, { @@ -118,7 +118,7 @@ "metadata": {}, "outputs": [], "source": [ - "constraint = zfit.constraint.GaussianConstraint(mu, observation=5275 * u.MeV, uncertainty=15 * u.MeV)" + "constraint = zfit.constraint.GaussianConstraint(mu, observation=5275 * u.MeV, sigma=15 * u.MeV)" ] }, { @@ -128,7 +128,7 @@ "outputs": [], "source": [ "nll = zfit.loss.ExtendedUnbinnedNLL(model, data, constraints=constraint)\n", - "minimizer = zfit.minimize.Minuit(gradient=True)\n", + "minimizer = zfit.minimize.Minuit(gradient=\"zfit\")\n", "result = minimizer.minimize(nll)\n", "result.hesse();" ] @@ -139,7 +139,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(result.params)" + "print(result)" ] }, { @@ -182,11 +182,13 @@ "# create some data\n", "signal_np_reso = np.random.normal(loc=mu_true, scale=sigma_true * 0.7, size=n_sig_reso)\n", "bkg_np_raw_reso = np.random.exponential(size=20000, scale=900)\n", - "bkg_np_reso = bkg_np_raw_reso[bkg_np_raw_reso<1000][:n_bkg_reso] + 5000\n", + "bkg_np_reso = bkg_np_raw_reso[bkg_np_raw_reso < 1000][:n_bkg_reso] + 5000\n", "\n", "# load data into zfit\n", - "obs_reso = zfit.Space('Bmass_reso', (5000, 6000))\n", - "data_reso = zfit.Data.from_numpy(obs=obs_reso, array=np.concatenate([signal_np_reso, bkg_np_reso], axis=0))" + "obs_reso = zfit.Space('Bmass_reso', 5000, 6000)\n", + "signal_data_reso = zfit.Data(signal_np_reso, obs=obs_reso)\n", + "bkg_data_reso = zfit.Data(bkg_np_reso, obs=obs_reso)\n", + "data_reso = zfit.data.concat([signal_data_reso, bkg_data_reso])" ] }, { @@ -235,7 +237,8 @@ "\n", "sigma_scaled = zfit.ComposedParameter('sigma scaled', # name\n", " sigma_scaled_fn, # function\n", - " params=[sigma, sigma_scaling] # the objects used inside the function\n", + " params=[sigma, sigma_scaling], # the objects used inside the function\n", + " unpack_params=True # we could also just use a `params` argument, a dict\n", " )" ] }, @@ -245,22 +248,19 @@ "metadata": {}, "outputs": [], "source": [ + "reso_sig_yield = zfit.Parameter('reso_sig_yield', n_sig_reso - 100, 0, n_sig_reso * 3,\n", + " step_size=1)\n", "signal_reso = zfit.pdf.Gauss(mu=mu, # the same as for the rare mode\n", " sigma=sigma_scaled,\n", - " obs=obs_reso\n", - " )\n", - "\n", - "lambda_reso = zfit.Parameter('lambda_reso', -0.002, -0.01, 0.0001) # floating\n", - "comb_bkg_reso_pdf = zfit.pdf.Exponential(lambda_reso, obs=obs_reso)\n", + " obs=obs_reso,\n", + " extended=reso_sig_yield)\n", "\n", - "reso_sig_yield = zfit.Parameter('reso_sig_yield', n_sig_reso - 100, 0, n_sig_reso * 3,\n", - " step_size=1) # step size: default is small, use appropriate\n", + "lambda_reso = zfit.Parameter('lambda_reso', -0.002, -0.01, 0.0001)\n", "reso_bkg_yield = zfit.Parameter('reso_bkg_yield', n_bkg_reso + 70, 0, 2e5, step_size=1)\n", + "comb_bkg_reso = zfit.pdf.Exponential(lambda_reso, obs=obs_reso, extended=reso_bkg_yield)\n", + "\n", "\n", - "# Create the extended models\n", - "extended_sig_reso = signal_reso.create_extended(reso_sig_yield)\n", - "extended_bkg_reso = comb_bkg_reso_pdf.create_extended(reso_bkg_yield)\n", - "model_reso = zfit.pdf.SumPDF([extended_bkg_reso, extended_sig_reso])" + "model_reso = zfit.pdf.SumPDF([comb_bkg_reso, signal_reso])" ] }, { @@ -294,7 +294,7 @@ "metadata": {}, "outputs": [], "source": [ - "extended_sig_reso.get_yield()" + "signal_reso.get_yield()" ] }, { @@ -339,10 +339,11 @@ "metadata": {}, "outputs": [], "source": [ - "# Sets the values of the parameters to the result of the simultaneous fit\n", + "# Sets the values of the parameters to the result of the simultaneous fit\n", "# in case they were modified.\n", "zfit.param.set_values(nll_simultaneous.get_params(), result_simultaneous)\n", "\n", + "\n", "def plot_fit_projection(model, data, nbins=30, ax=None):\n", " # The function will be reused.\n", " if ax is None:\n", @@ -371,6 +372,7 @@ "\n", " return ax\n", "\n", + "\n", "fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n", "\n", "for mod, dat, ax, nb in zip(nll_simultaneous.model, nll_simultaneous.data, axs, [30, 60]):\n", @@ -405,7 +407,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we would like to compute the significance of this observation or in other words the probabilty that this observation is the result of the statistical fluctuation. To do so we have to perform an hypothesis test where the null and alternative hypotheses are defined as:\n", + "Now we would like to compute the significance of this observation or, in other words, the probabilty that this observation is the result of the statistical fluctuation. To do so we have to perform an hypothesis test where the null and alternative hypotheses are defined as:\n", "\n", "* $H_{0}$, the null or background only hypothesis, i.e. $N_{sig} = 0$;\n", "* $H_{1}$, the alternative hypothesis, i.e $N_{sig} = \\hat{N}_{sig}$, where $\\hat{N}_{sig}$ is the fitted value of $N_{sig}$ printed above.\n", @@ -521,7 +523,7 @@ "source": [ "# Sets the values of the parameters to the result of the simultaneous fit\n", "zfit.param.set_values(nll_simultaneous.get_params(), result_simultaneous)\n", - "sigma_scaling.floating=False\n", + "sigma_scaling.floating = False\n", "\n", "# Creates a sampler that will draw events from the model\n", "sampler = model.create_sampler()\n", @@ -587,7 +589,7 @@ "from hepstats.hypotests import UpperLimit\n", "from hepstats.hypotests.parameters import POIarray\n", "\n", - "# Background only hypothesis.\n", + "#Background only hypothesis.\n", "bkg_only = POI(sig_yield, 0)\n", "# Range of Nsig values to scan.\n", "sig_yield_scan = POIarray(sig_yield, np.linspace(0, 70, 10))\n", @@ -630,12 +632,12 @@ "source": [ "# Signal distributions.\n", "nsig_sw = 20000\n", - "np_sig_m_sw = signal_reso.sample(nsig_sw).numpy().reshape(-1,)\n", + "np_sig_m_sw = signal_reso.sample(nsig_sw)[\"Bmass_reso\"]\n", "np_sig_t_sw = np.random.exponential(size=nsig_sw, scale=1)\n", "\n", "# Background distributions.\n", "nbkg_sw = 150000\n", - "np_bkg_m_sw = comb_bkg_reso_pdf.sample(nbkg_sw).numpy().reshape(-1,)\n", + "np_bkg_m_sw = comb_bkg_reso.sample(nbkg_sw)[\"Bmass_reso\"]\n", "np_bkg_t_sw = np.random.normal(size=nbkg_sw, loc=2.0, scale=2.5)\n", "\n", "# Lifetime cut.\n", @@ -673,11 +675,11 @@ "outputs": [], "source": [ "# Builds the loss.\n", - "data_sw = zfit.Data.from_numpy(obs=obs_reso, array=np_m_sw)\n", + "data_sw = zfit.Data(np_m_sw, obs=obs_reso)\n", "nll_sw = zfit.loss.ExtendedUnbinnedNLL(model_reso, data_sw)\n", "\n", - "# This parameter was useful in the simultaneous fit but not anymore so we fix it.\n", - "sigma_scaling.floating=False\n", + "#This parameter was useful in the simultaneous fit but not anymore so we fix it.\n", + "sigma_scaling.floating = False\n", "\n", "# Minimizes the loss.\n", "result_sw = minimizer.minimize(nll_sw)\n", diff --git a/_website/tutorials/guides/custom_models.ipynb b/_website/tutorials/guides/custom_models.ipynb index ea9adfc..cb33ff3 100644 --- a/_website/tutorials/guides/custom_models.ipynb +++ b/_website/tutorials/guides/custom_models.ipynb @@ -18,7 +18,7 @@ "\n", "Following the philosophy of zfit, there are different levels of customization. For the most simple use-case, all we need to do is to provide a function describing the shape and the name of the parameters. This can be done by overriding `_unnormalized_pdf`.\n", "\n", - "To implement a mathematical function in zfit, TensorFlow or z should be used. The latter is a subset of TensorFlow and improves it in some aspects, such as automatic dtype casting, and therefore preferred to use.\n", + "To implement a mathematical function in zfit, znp, a numpy-like interface or z, the backend, should be used. This ensures that the function can be traced and the gradient can be calculated. If a function is not available, `tf` can also be used.\n", "(_There are other ways to use arbitrary Python functions, they will be discussed later on_)." ] }, @@ -36,6 +36,7 @@ "import numpy as np\n", "import tensorflow as tf\n", "import zfit\n", + "import zfit.z.numpy as znp\n", "from zfit import z" ] }, @@ -68,10 +69,11 @@ " \"\"\"Second order polynomial `a + b * x + c * x^2`\"\"\"\n", " _PARAMS = ['b', 'c'] # specify which parameters to take\n", "\n", - " def _unnormalized_pdf(self, x): # implement function, unnormalized\n", - " data = z.unstack_x(x)\n", - " b = self.params['b']\n", - " c = self.params['c']\n", + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params): # implement function, unnormalized\n", + " data = x[0] # axis 0\n", + " b = params['b']\n", + " c = params['c']\n", "\n", " return 1 + b * data + c * data ** 2" ] @@ -99,7 +101,7 @@ }, "outputs": [], "source": [ - "obs = zfit.Space(\"obs1\", limits=(-4, 4))\n", + "obs = zfit.Space(\"obs1\", -4, 4)\n", "\n", "b = zfit.Parameter('b', 0.2, 0.1, 10)\n", "custom_poly = SecondOrderPoly(obs=obs, b=b, c=1.4)" @@ -156,17 +158,18 @@ "outputs": [], "source": [ "# define the integral function\n", + "\n", + "\n", "def cdf_poly(limit, b, c):\n", " return limit + 0.5 * b * limit ** 2 + 1 / 3 * c * limit ** 3\n", "\n", + "\n", "def integral_func(limits, norm_range, params, model):\n", "\n", " b = params['b']\n", " c = params['c']\n", "\n", - " lower, upper = limits.limit1d\n", - " lower = z.convert_to_tensor(lower) # the limits are now 1-D, for axis 1\n", - " upper = z.convert_to_tensor(upper)\n", + " lower, upper = limits.v1.limits\n", "\n", " # calculate the integral\n", " integral = cdf_poly(upper, b, c) - cdf_poly(lower, b, c)\n", @@ -265,17 +268,20 @@ " _PARAMS = ['FL', 'S3', 'S4', 'S5', 'AFB', 'S7', 'S8', 'S9']\n", " _N_OBS = 3\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " FL = self.params['FL']\n", - " S3 = self.params['S3']\n", - " S4 = self.params['S4']\n", - " S5 = self.params['S5']\n", - " AFB = self.params['AFB']\n", - " S7 = self.params['S7']\n", - " S8 = self.params['S8']\n", - " S9 = self.params['S9']\n", - "\n", - " costheta_l, costheta_k, phi = z.unstack_x(x)\n", + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " FL = params['FL']\n", + " S3 = params['S3']\n", + " S4 = params['S4']\n", + " S5 = params['S5']\n", + " AFB = params['AFB']\n", + " S7 = params['S7']\n", + " S8 = params['S8']\n", + " S9 = params['S9']\n", + "\n", + " costheta_l = x[0]\n", + " costheta_k = x[1]\n", + " phi = x[2]\n", "\n", " sintheta_k = tf.sqrt(1.0 - costheta_k * costheta_k)\n", " sintheta_l = tf.sqrt(1.0 - costheta_l * costheta_l)\n", @@ -291,13 +297,13 @@ " FL * costheta_k * costheta_k +\n", " (1.0 / 4.0) * (1.0 - FL) * sintheta_2k * cos2theta_l +\n", " -1.0 * FL * costheta_k * costheta_k * cos2theta_l +\n", - " S3 * sintheta_2k * sintheta_2l * tf.cos(2.0 * phi) +\n", - " S4 * sin2theta_k * sin2theta_l * tf.cos(phi) +\n", - " S5 * sin2theta_k * sintheta_l * tf.cos(phi) +\n", + " S3 * sintheta_2k * sintheta_2l * znp.cos(2.0 * phi) +\n", + " S4 * sin2theta_k * sin2theta_l * znp.cos(phi) +\n", + " S5 * sin2theta_k * sintheta_l * znp.cos(phi) +\n", " (4.0 / 3.0) * AFB * sintheta_2k * costheta_l +\n", - " S7 * sin2theta_k * sintheta_l * tf.sin(phi) +\n", - " S8 * sin2theta_k * sin2theta_l * tf.sin(phi) +\n", - " S9 * sintheta_2k * sintheta_2l * tf.sin(2.0 * phi))\n", + " S7 * sin2theta_k * sintheta_l * znp.sin(phi) +\n", + " S8 * sin2theta_k * sin2theta_l * znp.sin(phi) +\n", + " S9 * sintheta_2k * sintheta_2l * znp.sin(2.0 * phi))\n", "\n", " return pdf" ] @@ -664,9 +670,11 @@ "source": [ "x_tf = z.constant(42.)\n", "\n", + "\n", "def sqrt(x):\n", " return np.sqrt(x)\n", "\n", + "\n", "y = z.py_function(func=sqrt, inp=[x_tf], Tout=tf.float64)" ] }, @@ -758,12 +766,13 @@ "class NumpyGauss(zfit.pdf.ZPDF):\n", " _PARAMS = ['mu', 'sigma']\n", "\n", - " def _unnormalized_pdf(self, x):\n", + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", " zfit.run.assert_executing_eagerly() # make sure we're eager\n", - " data = z.unstack_x(x)\n", - " mu = self.params['mu']\n", - " sigma = self.params['sigma']\n", - " return z.convert_to_tensor(np.exp( - 0.5 * (data - mu) ** 2 / sigma ** 2))" + " data = x[0]\n", + " mu = params['mu']\n", + " sigma = params['sigma']\n", + " return np.exp( - 0.5 * (data - mu) ** 2 / sigma ** 2) # note that we use numpy here" ] }, { @@ -774,7 +783,7 @@ } }, "source": [ - "Make sure to return a Tensor again, otherwise there will be an error." + "This can be tested and compared." ] }, { diff --git a/_website/tutorials/introduction/Introduction.ipynb b/_website/tutorials/introduction/Introduction.ipynb index e4e7a4b..bcd1757 100644 --- a/_website/tutorials/introduction/Introduction.ipynb +++ b/_website/tutorials/introduction/Introduction.ipynb @@ -49,7 +49,7 @@ "\n", "zfit knows unbinned and binned datasets.\n", "\n", - "The unbinned `Data` can load data from various sources, most notably from Numpy, Pandas DataFrame, TensorFlow Tensor and ROOT (using uproot). It is also possible, for convenience, to convert it directly `to_pandas`. The constructors are named `from_numpy`, `from_root` etc." + "The unbinned `Data` can load data from various sources, such as Numpy, Pandas DataFrame, TensorFlow Tensor and ROOT (using uproot). It is also possible, for convenience, to convert it directly `to_pandas`. The constructors are named `from_numpy`, `from_root` etc." ] }, { @@ -71,7 +71,9 @@ }, "outputs": [], "source": [ - "obs = zfit.Space('obs1', (-5, 10))" + "obs = zfit.Space('obs1', -5, 10)\n", + "# or more explicitly\n", + "obs = zfit.Space(obs='obs1', lower=-5, upper=10)" ] }, { @@ -79,7 +81,7 @@ "metadata": {}, "source": [ "This `Space` has limits and offers the following functionality:\n", - "- area(): calculate the area (e.g. distance between upper and lower)\n", + "- volume(): calculate the volume (e.g. distance between upper and lower)\n", "- inside(): return a boolean Tensor corresponding to whether the value is _inside_ the `Space`\n", "- filter(): filter the input values to only return the one inside" ] @@ -97,7 +99,7 @@ "size_normal = 10_000\n", "data_normal_np = np.random.normal(size=size_normal, scale=2)\n", "\n", - "data_normal = zfit.Data.from_numpy(obs=obs, array=data_normal_np)" + "data_normal = zfit.Data(data_normal_np, obs=obs)" ] }, { @@ -106,11 +108,15 @@ "source": [ "The main functionality is\n", "- nevents: attribute that returns the number of events in the object\n", - "- data_range: a `Space` that defines the limits of the data; if outside, the data will be cut\n", - "- n_obs: defines the number of dimensions in the dataset\n", - "- with_obs: returns a subset of the dataset with only the given obs\n", + "- space: a `Space` that defines the limits of the data; if outside, the data will be cut (!)\n", + "- n_obs: the number of dimensions in the dataset\n", + "- with_obs: returns a dataset, possibly a subset of the dataset with only the given obs\n", + "- `with_weights`: returns a dataset with the given weights\n", "- weights: event weights\n", "\n", + "Using indexing `data[obs]` will return a `Data` object with only the given observables and follows a similar behavior as Pandas DataFrames.\n", + "A string, i.e. a single observable, returns an array, using alist of strings returns a `Data` object with the given observables.\n", + "\n", "Furthermore, `value` returns a Tensor with shape `(nevents, n_obs)`." ] }, @@ -377,9 +383,11 @@ "source": [ "### Tensors\n", "\n", - "As we see, many zfit functions return Tensors. This is however no magical thing! If we're outside of models, then we can always safely convert them to a numpy array by calling `zfit.run(...)` on it (or any structure containing potentially multiple Tensors) or simply `np.array`. However, this may not even be required often! They can be added just like numpy arrays and interact well with Python and Numpy:\n", + "As we see, many zfit functions return Tensors. Usually, these are `array-Like` objects that resemble numpy arrays close enough that they can be directly used in-place of actual numpy arrays.\n", + "\n", + "If explicitly needed, we can always safely convert them to a numpy array by calling `np.asarray(tensor)`. \n", "\n", - "[**Extended tutorial on TensorFlow**](https://zfit-tutorials.readthedocs.io/en/master/tutorials/TensorFlow/HPC_with_TensorFlow.html)" + "More information about the backend can be found in [**Extended tutorial on TensorFlow**](https://zfit-tutorials.readthedocs.io/en/master/tutorials/TensorFlow/HPC_with_TensorFlow.html)" ] }, { @@ -392,7 +400,7 @@ }, "outputs": [], "source": [ - "np.sqrt(integral)" + "np.sqrt(integral) # works out-of-the-box" ] }, { @@ -606,7 +614,7 @@ }, "outputs": [], "source": [ - "mass_obs = zfit.Space('mass', (0, 1000))" + "mass_obs = zfit.Space('mass', 0, 1000)" ] }, { @@ -834,7 +842,10 @@ "outputs": [], "source": [ "yield_model = zfit.Parameter('yield_model', 10000, 0, 20000, step_size=10)\n", - "model_ext = model.create_extended(yield_model)" + "# model_ext = model.create_extended(yield_model) # using an existing model\n", + "\n", + "# when creating a new model \n", + "model_ext = zfit.pdf.SumPDF([signal, comb_bkg, part_reco], [sig_frac, comb_bkg_frac], extended=yield_model)" ] }, { diff --git a/_website/tutorials/introduction/Introduction_long.ipynb b/_website/tutorials/introduction/Introduction_long.ipynb index 92c73fd..c7c665e 100644 --- a/_website/tutorials/introduction/Introduction_long.ipynb +++ b/_website/tutorials/introduction/Introduction_long.ipynb @@ -104,7 +104,7 @@ }, "outputs": [], "source": [ - "obs = zfit.Space('obs1', (-5, 10))" + "obs = zfit.Space('obs1', -5, 10)" ] }, { diff --git a/_website/tutorials/introduction/Quickstart.ipynb b/_website/tutorials/introduction/Quickstart.ipynb index 347104e..eb97684 100644 --- a/_website/tutorials/introduction/Quickstart.ipynb +++ b/_website/tutorials/introduction/Quickstart.ipynb @@ -80,7 +80,7 @@ }, "outputs": [], "source": [ - "obs = zfit.Space('x', limits=(-10, 10))" + "obs = zfit.Space('x', -10, 10)" ] }, { @@ -186,14 +186,7 @@ } }, "source": [ - "This pdf contains several useful methods, such as calculating a probability, calculating its integral, sampling etc.\n", - "\n", - "**Note**: Several objects that are returned from methods, like `integrate`, return `tf.Tensor`, which are wrapped Numpy arrays.\n", - "They can directly be used as such or explicitly converted to by calling:\n", - "\n", - "```python\n", - "zfit.run(TensorFlow_object)\n", - "```" + "This pdf contains several useful methods, such as calculating a probability, calculating its integral, sampling etc." ] }, { @@ -209,9 +202,7 @@ "# Let's get some probabilities.\n", "consts = [-1, 0, 1]\n", "probs = gauss.pdf(consts)\n", - "# And now execute the tensorflow graph\n", - "result = zfit.run(probs)\n", - "print(f\"x values: {consts}\\nresult: {result}\")" + "print(f\"x values: {consts}\\nresult: {probs}\")" ] }, { @@ -300,8 +291,8 @@ "range_ = (-5,5)\n", "_ = plt.hist(data_np, bins=n_bins, range=range_)\n", "x = np.linspace(*range_, num=1000)\n", - "pdf = zfit.run(gauss.pdf(x))\n", - "_ = plt.plot(x, data_np.shape[0] / n_bins * (range_[1] - range_[0]) * pdf)" + "probs = gauss.pdf(x)\n", + "_ = plt.plot(x, data_np.shape[0] / n_bins * (range_[1] - range_[0]) * probs)" ] }, { diff --git a/components/05 - Exploring the FitResult.ipynb b/components/05 - Exploring the FitResult.ipynb index 96b67e7..9f5fc5c 100644 --- a/components/05 - Exploring the FitResult.ipynb +++ b/components/05 - Exploring the FitResult.ipynb @@ -35,15 +35,11 @@ "metadata": {}, "outputs": [], "source": [ - "obs = zfit.Space('x', limits=(0, 10))\n", + "obs = zfit.Space('x', 0, 10)\n", "mu = zfit.Parameter('mu', 5, 0, 10)\n", "sigma = zfit.Parameter('sigma', 1, 0, 10)\n", "nsig = zfit.Parameter('nsig', 1000, 0, 10000)\n", - "gauss_nonext = zfit.pdf.Gauss(obs=obs, mu=mu, sigma=sigma,\n", - " # extended=nsig # requires zfit>=0.13\n", - " )\n", - "gauss = gauss_nonext.create_extended(nsig)\n", - "\n", + "gauss = zfit.pdf.Gauss(obs=obs, mu=mu, sigma=sigma,extended=nsig)\n", "data = gauss.sample()\n", "print(f\"The sampled data (poisson fluctuated) has {data.nevents} events.\")" ] @@ -319,7 +315,7 @@ "outputs": [], "source": [ "weighted_data = zfit.Data.from_tensor(obs=obs, tensor=data.value(), weights=znp.random.uniform(0.1, 5, size=(data.nevents,)))\n", - "weighted_nll = zfit.loss.UnbinnedNLL(model=gauss_nonext, data=weighted_data)\n", + "weighted_nll = zfit.loss.UnbinnedNLL(model=gauss, data=weighted_data)\n", "weighted_result = minimizer.minimize(weighted_nll)" ] }, diff --git a/components/20 - Composite Models.ipynb b/components/20 - Composite Models.ipynb index 761f3d5..f972cea 100644 --- a/components/20 - Composite Models.ipynb +++ b/components/20 - Composite Models.ipynb @@ -33,7 +33,7 @@ "source": [ "frac = zfit.Parameter(\"frac_gauss\", 0.5, 0, 1)\n", "\n", - "obs1 = zfit.Space('obs1', limits=(-5, 5))\n", + "obs1 = zfit.Space('obs1',-5, 5)\n", "\n", "mu1 = zfit.Parameter(\"mu1\", 1.)\n", "sigma1 = zfit.Parameter(\"sigma1\", 1.)\n", @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "obs2 = zfit.Space('obs2', limits=(-3, 7))\n", + "obs2 = zfit.Space('obs2', -3, 7)\n", "mu3 = zfit.Parameter(\"mu3\", 1.)\n", "sigma3 = zfit.Parameter(\"sigma3\", 1.)\n", "gauss3 = zfit.pdf.Gauss(obs=obs2, mu=mu3, sigma=sigma3) # different obs than above." diff --git a/components/30 - Binned models.ipynb b/components/30 - Binned models.ipynb index 26feeea..1c88ee3 100644 --- a/components/30 - Binned models.ipynb +++ b/components/30 - Binned models.ipynb @@ -52,7 +52,7 @@ "\n", "normal_np = np.random.normal(loc=2., scale=3., size=10000)\n", "\n", - "obs = zfit.Space(\"x\", limits=(-10, 10))\n", + "obs = zfit.Space(\"x\", -10, 10)\n", "\n", "mu = zfit.Parameter(\"mu\", 1., -4, 6)\n", "sigma = zfit.Parameter(\"sigma\", 1., 0.1, 10)\n", @@ -81,7 +81,7 @@ "obs_bin = zfit.Space(\"x\", binning=binning)\n", "\n", "data = data_nobin.to_binned(obs_bin)\n", - "model = zfit.pdf.BinnedFromUnbinnedPDF(model_nobin, obs_bin)\n", + "model = model_nobin.to_binned(obs_bin)\n", "loss = zfit.loss.BinnedNLL(model, data)" ] }, @@ -160,7 +160,7 @@ "\n", "plt.figure()\n", "mplhep.histplot(model_hist, density=1, label=\"model\")\n", - "mplhep.histplot(data.to_hist(), density=1, label=\"data\")\n", + "mplhep.histplot(data, density=1, label=\"data\")\n", "plt.legend()\n", "plt.title(\"After fit\")" ] diff --git a/components/33 - Binned fits.ipynb b/components/33 - Binned fits.ipynb index ff9a8dc..2f7976a 100644 --- a/components/33 - Binned fits.ipynb +++ b/components/33 - Binned fits.ipynb @@ -64,7 +64,7 @@ "source": [ "normal_np = np.random.normal(loc=2., scale=1.3, size=10000)\n", "\n", - "obs = zfit.Space(\"x\", limits=(-10, 10))\n", + "obs = zfit.Space(\"x\", -10, 10)\n", "\n", "mu = zfit.Parameter(\"mu\", 1., -4, 6)\n", "sigma = zfit.Parameter(\"sigma\", 1., 0.1, 10)\n", @@ -487,8 +487,7 @@ "metadata": {}, "outputs": [], "source": [ - "bkg_hist = zfit.Data.from_numpy(obs=obs, array=np.random.exponential(scale=20, size=100_000) - 10).to_binned(\n", - " obs_binned)\n", + "bkg_hist = zfit.Data(np.random.exponential(scale=20, size=100_000) - 10, obs=obs_binned)\n", "bkg_hist_m1 = zfit.Data.from_numpy(obs=obs,\n", " array=np.random.exponential(scale=35, size=100_000) - 10).to_binned(\n", " obs_binned)\n", @@ -571,8 +570,8 @@ "outputs": [], "source": [ "modifier_constraints = zfit.constraint.GaussianConstraint(params=list(modifiers.values()), observation=np.ones(len(modifiers)),\n", - " uncertainty=np.ones(len(modifiers)))\n", - "# alpha_constraint = zfit.constraint.GaussianConstraint(alpha, 0, 1)" + " uncertainty=0.1 * np.ones(len(modifiers)))\n", + "alpha_constraint = zfit.constraint.GaussianConstraint(alpha, 0, 1)" ] }, { @@ -581,7 +580,7 @@ "metadata": {}, "outputs": [], "source": [ - "loss_binned = zfit.loss.ExtendedBinnedNLL(model, data, constraints=modifier_constraints)" + "loss_binned = zfit.loss.ExtendedBinnedNLL(model, data, constraints=[modifier_constraints, alpha_constraint])" ] }, { diff --git a/components/50 - Custom code and run mode.ipynb b/components/50 - Custom code and run mode.ipynb index 1a4c5d3..0a35160 100644 --- a/components/50 - Custom code and run mode.ipynb +++ b/components/50 - Custom code and run mode.ipynb @@ -51,6 +51,7 @@ "import numpy as np\n", "import tensorflow as tf\n", "import zfit\n", + "import zfit.z.numpy as znp # this is numpy-like\n", "from zfit import z # this is basically tf, just wrapped" ] }, @@ -166,7 +167,7 @@ "metadata": {}, "outputs": [], "source": [ - "graph_func(z.constant(5))" + "graph_func(znp.array(5))" ] }, { @@ -175,7 +176,7 @@ "metadata": {}, "outputs": [], "source": [ - "graph_func(z.constant(7))" + "graph_func(znp.array(7))" ] }, { @@ -210,7 +211,7 @@ "outputs": [], "source": [ "try:\n", - " graph_func_fail(z.constant(5.))\n", + " graph_func_fail(znp.array(5.))\n", "except NotImplementedError as error:\n", " print(f\"Error was raised, last line: {error}\")" ] @@ -262,7 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "graph_func_fail(z.constant(5))" + "graph_func_fail(znp.array(5))" ] }, { @@ -299,6 +300,7 @@ "def numpy_func(x, a):\n", " return np.square(x) * a\n", "\n", + "\n", "@z.function\n", "def wrapped_numpy_func(x_tensor, a_tensor):\n", " result = z.py_function(func=numpy_func, inp=[x_tensor, a_tensor], Tout=zfit.ztypes.float) # or tf.float64\n", diff --git a/components/60 - Custom PDF.ipynb b/components/60 - Custom PDF.ipynb index 8c5c051..9899c83 100644 --- a/components/60 - Custom PDF.ipynb +++ b/components/60 - Custom PDF.ipynb @@ -44,11 +44,12 @@ " _N_OBS = 1 # dimension, can be omitted\n", " _PARAMS = ['mean', 'std'] # the name of the parameters\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " x = z.unstack_x(x) # returns a list with the columns: do x, y, z = z.unstack_x(x) for 3D\n", - " mean = self.params['mean']\n", - " std = self.params['std']\n", - " return z.exp(- ((x - mean) / std) ** 2)" + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " x0 = x[0] # using the 0th axis\n", + " mean = params['mean']\n", + " std = params['std']\n", + " return z.exp(- ((x0 - mean) / std) ** 2)" ] }, { @@ -70,18 +71,19 @@ "source": [ "class MyGauss(zfit.pdf.BasePDF):\n", "\n", - " def __init__(self, mean, std, obs, extended=None, norm=None, name=None):\n", + " def __init__(self, mean, std, obs, extended=None, norm=None, name=None, label=None):\n", " params = {'mean': mean, # 'mean' is the name as it will be named in the PDF, mean is just the parameter to create the PDF\n", " 'std': std\n", " }\n", " super().__init__(obs=obs, params=params, extended=extended, norm=norm,\n", - " name=name)\n", + " name=name, label=label)\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " x = z.unstack_x(x)\n", - " mean = self.params['mean']\n", - " std = self.params['std']\n", - " return z.exp(- ((x - mean) / std) ** 2)" + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " x0 = x[0] # using the 0th axis\n", + " mean = params['mean']\n", + " std = params['std']\n", + " return z.exp(- ((x0 - mean) / std) ** 2)" ] }, { @@ -90,10 +92,10 @@ "metadata": {}, "outputs": [], "source": [ - "obs = zfit.Space('obs1', limits=(-3, 6))\n", + "obs = zfit.Space('obs1', -3, 6)\n", "\n", "data_np = np.random.random(size=1000)\n", - "data = zfit.data.Data.from_numpy(array=data_np, obs=obs)" + "data = zfit.Data(data_np, obs=obs)" ] }, { @@ -153,7 +155,7 @@ "outputs": [], "source": [ "def gauss_integral_from_any_to_any(limits, params, model):\n", - " lower, upper = limits.limit1d\n", + " lower, upper = limits.v1.limits\n", " mean = params['mean']\n", " std = params['std']\n", " # write your integral here\n", @@ -166,7 +168,7 @@ "metadata": {}, "outputs": [], "source": [ - "limits = zfit.Space(axes=0, limits=(zfit.Space.ANY_LOWER, zfit.Space.ANY_UPPER))\n", + "limits = zfit.Space(axes=0, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", "MyGauss.register_analytic_integral(func=gauss_integral_from_any_to_any, limits=limits)" ] }, diff --git a/components/62 - Multidim Custom PDF.ipynb b/components/62 - Multidim Custom PDF.ipynb index 4aa67ed..f3c76a0 100644 --- a/components/62 - Multidim Custom PDF.ipynb +++ b/components/62 - Multidim Custom PDF.ipynb @@ -23,6 +23,7 @@ "source": [ "import numpy as np\n", "import zfit\n", + "import zfit.z.numpy as znp\n", "from zfit import z" ] }, @@ -47,13 +48,18 @@ " _N_OBS = 3 # dimension, can be omitted\n", " _PARAMS = ['xshift', 'yshift'] # the name of the parameters\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " x1, x2, x3 = x.unstack_x() # returns a list with the columns: do x1, x2, x3 = z.unstack_x(x) for 3D\n", - " xshift = self.params['xshift']\n", - " yshift = self.params['yshift']\n", - " x1 = x1 + xshift\n", - " x2 = x2 + yshift\n", - " return z.sqrt(z.square(x1) + z.square(x2) + z.square(x3)) # dummy calculations" + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " x0 = x[0]\n", + " x1 = x[1]\n", + " x2 = x[2]\n", + " # alternatively, we could use the following line to get the same result\n", + " # x0, x1, x2 = z.unstack_x(x) # returns a list with the columns: do x1, x2, x3 = z.unstack_x(x) for 3D\n", + " xshift = params['xshift']\n", + " yshift = params['yshift']\n", + " x0 = x0 + xshift\n", + " x1 = x1 + yshift\n", + " return znp.sqrt(znp.square(x0) + x1 ** 2 + znp.power(x2, 2)) # dummy calculations, all are equivalent" ] }, { @@ -102,17 +108,8 @@ "metadata": {}, "outputs": [], "source": [ - "probs = abs_vector.pdf(data)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "probs_np = zfit.run(probs)\n", - "print(probs_np[:20])" + "probs = abs_vector.pdf(data)\n", + "print(probs[:20])" ] }, { @@ -139,7 +136,7 @@ "outputs": [], "source": [ "def abs_vector_integral_from_any_to_any(limits, params, model):\n", - " lower, upper = limits.limits\n", + " lower, upper = limits.v1.limits\n", " # write your integral here\n", " return 42. # dummy integral, must be a scalar!" ] @@ -157,24 +154,11 @@ "metadata": {}, "outputs": [], "source": [ - "limits_to_integrate = (((zfit.Space.ANY_LOWER, zfit.Space.ANY_LOWER, zfit.Space.ANY_LOWER),),\n", - " ((zfit.Space.ANY_UPPER,zfit.Space.ANY_UPPER,zfit.Space.ANY_UPPER),))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we need the axis we will integrate over" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "axes_to_integrate = (0, 1, 2) # implies this is over all axes of the pdf" + "limit0 = zfit.Space(axes=0, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", + "limit1 = zfit.Space(axes=1, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", + "limit2 = zfit.Space(axes=2, lower=zfit.Space.ANY_LOWER, upper=zfit.Space.ANY_UPPER)\n", + "limits = limit0 * limit1 * limit2 # creates the 3D limits\n", + "print(limits)" ] }, { @@ -190,8 +174,6 @@ "metadata": {}, "outputs": [], "source": [ - "limits = zfit.Space(axes=axes_to_integrate, limits=limits_to_integrate)\n", - "\n", "AbsVectorShifted.register_analytic_integral(func=abs_vector_integral_from_any_to_any, limits=limits,\n", " priority=51,\n", " supports_norm_range=False, # False by default, but could be set to\n", diff --git a/components/80 - Toy Study.ipynb b/components/80 - Toy Study.ipynb index feb7c91..b8afe22 100644 --- a/components/80 - Toy Study.ipynb +++ b/components/80 - Toy Study.ipynb @@ -27,7 +27,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We gonna build a simple model, just a Gaussian. But, given the well defined workflow of zfit, `model` can be exchanged by _any_ complicated composition or custom model." + "We will build a simple model, just a Gaussian. But, given the well defined workflow of zfit, `model` can be exchanged by _any_ complicated composition or custom model." ] }, { @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "obs = zfit.Space('x', (-5, 5))\n", + "obs = zfit.Space('x', -5, 5)\n", "\n", "sigma = zfit.Parameter('sigma', 1, 0.1, 10)\n", "mu = zfit.Parameter('mu', 0, -1, 1)\n", @@ -56,14 +56,14 @@ "metadata": {}, "outputs": [], "source": [ - "sampler = model.create_sampler(n=3000, fixed_params=True)" + "sampler = model.create_sampler(n=3000)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "So far, no sampling happened yet. But first, we build our whole chain, just using our sampler as `data`." + "This takes a while, as the first resampling is happening now. But first, we build our whole chain, just using our sampler as `data`." ] }, { @@ -74,8 +74,8 @@ "source": [ "nll = zfit.loss.UnbinnedNLL(model, sampler)\n", "\n", - "from zfit.minimize import \\\n", - " DefaultToyStrategy # this stategy does not raise an error with NaNs but returns a non-converged `FitResult`\n", + "# this stategy does not raise an error with NaNs but returns a non-converged `FitResult`\n", + "from zfit.minimize import DefaultToyStrategy\n", "\n", "minimizer = zfit.minimize.Minuit(strategy=DefaultToyStrategy(), verbosity=0, tol=1e-3, use_minuit_grad=True)" ] diff --git a/components/90 - Serialization basics.ipynb b/components/90 - Serialization basics.ipynb index 18365e0..e83adbf 100644 --- a/components/90 - Serialization basics.ipynb +++ b/components/90 - Serialization basics.ipynb @@ -50,7 +50,7 @@ "source": [ "mu = zfit.Parameter(\"mu\", 1.2, -4, 5)\n", "sigma = zfit.Parameter(\"sigma\", 3, 0, 10)\n", - "obs = zfit.Space(\"obs1\", limits=(-10, 20))\n", + "obs = zfit.Space(\"obs1\", -10, 20)\n", "model = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)" ] }, @@ -91,7 +91,7 @@ }, "outputs": [], "source": [ - "x = np.linspace(*obs.limit1d, 1000)\n", + "x = np.linspace(*obs.v1.limits, 1000)\n", "mu.set_value(1.5)\n", "sigma.set_value(2)\n", "mplhep.histplot(data.to_binned(50), density=True, label=\"data\")\n", @@ -391,7 +391,7 @@ "\n", "with asdf.open(\"data.asdf\") as f:\n", " tree = f.tree\n", - " data = zfit.Data.from_asdf(f)\n" + " data = zfit.Data.from_asdf(f)" ] }, { @@ -504,6 +504,13 @@ "source": [ "zfit.hs3.loads(hs3dumped)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/guides/constraints_simultaneous_fit_discovery_splot.ipynb b/guides/constraints_simultaneous_fit_discovery_splot.ipynb index f53672f..9e94b92 100644 --- a/guides/constraints_simultaneous_fit_discovery_splot.ipynb +++ b/guides/constraints_simultaneous_fit_discovery_splot.ipynb @@ -36,9 +36,9 @@ "import numpy as np\n", "import particle.literals as lp\n", "import tensorflow as tf\n", - "import zfit\n", + "import zfit.z.numpy\n", "\n", - "plt.rcParams['figure.figsize'] = (8,6)" + "plt.rcParams['figure.figsize'] = (8, 6)" ] }, { @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "mu_true = lp.B_plus.mass * u.MeV\n", + "mu_true = lp.B_plus.mass * u.MeV\n", "sigma_true = 50 * u.MeV\n", "\n", "# number of signal and background\n", @@ -64,7 +64,7 @@ "# create some data\n", "signal_np = np.random.normal(loc=mu_true, scale=sigma_true, size=n_sig_rare)\n", "bkg_np_raw = np.random.exponential(size=20000, scale=700)\n", - "bkg_np = bkg_np_raw[bkg_np_raw<1000][:n_bkg_rare] + 5000 # just cutting right, but zfit could also cut" + "bkg_np = bkg_np_raw[bkg_np_raw < 1000][:n_bkg_rare] + 5000 # just cutting right, but zfit could also cut" ] }, { @@ -74,7 +74,7 @@ "outputs": [], "source": [ "# Firstly, the observable and its range is defined\n", - "obs = zfit.Space('Bmass', (5000, 6000)) # for whole range" + "obs = zfit.Space('Bmass', 5000, 6000) # for whole range" ] }, { @@ -83,8 +83,12 @@ "metadata": {}, "outputs": [], "source": [ - "# load data into zfit\n", - "data = zfit.Data.from_numpy(obs=obs, array=np.concatenate([signal_np, bkg_np], axis=0))" + "# load data into zfit and let zfit concatenate the data\n", + "signal_data = zfit.Data(signal_np, obs=obs)\n", + "bkg_data = zfit.Data(bkg_np, obs=obs)\n", + "data = zfit.data.concat([signal_data, bkg_data])\n", + "# (we could also do it manually)\n", + "# data = zfit.Data(array=np.concatenate([signal_np, bkg_np], axis=0), obs=obs)" ] }, { @@ -96,20 +100,16 @@ "# Parameters are specified: (name (unique), initial, lower, upper) whereas lower, upper are optional\n", "mu = zfit.Parameter('mu', 5279, 5100, 5400)\n", "sigma = zfit.Parameter('sigma', 20, 1, 200)\n", - "signal = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)\n", + "sig_yield = zfit.Parameter('sig_yield', n_sig_rare + 30,\n", + " step_size=3) # step size: default is small, use appropriate\n", + "signal = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs, extended=sig_yield)\n", "\n", "lam = zfit.Parameter('lambda', -0.002, -0.1, -0.00001, step_size=0.001) # floating, also without limits\n", - "comb_bkg = zfit.pdf.Exponential(lam, obs=obs)\n", - "\n", - "sig_yield = zfit.Parameter('sig_yield', n_sig_rare + 30,\n", - " step_size=3) # step size: default is small, use appropriate\n", "bkg_yield = zfit.Parameter('bkg_yield', n_bkg_rare - 40, step_size=1)\n", - "# Create extended PDFs\n", - "extended_sig = signal.create_extended(sig_yield)\n", - "extended_bkg = comb_bkg.create_extended(bkg_yield)\n", + "comb_bkg = zfit.pdf.Exponential(lam, obs=obs, extended=bkg_yield)\n", "\n", "# The final model is the combination of the signal and backgrond PDF\n", - "model = zfit.pdf.SumPDF([extended_bkg, extended_sig])" + "model = zfit.pdf.SumPDF([comb_bkg, signal])" ] }, { @@ -118,7 +118,7 @@ "metadata": {}, "outputs": [], "source": [ - "constraint = zfit.constraint.GaussianConstraint(mu, observation=5275 * u.MeV, uncertainty=15 * u.MeV)" + "constraint = zfit.constraint.GaussianConstraint(mu, observation=5275 * u.MeV, sigma=15 * u.MeV)" ] }, { @@ -128,7 +128,7 @@ "outputs": [], "source": [ "nll = zfit.loss.ExtendedUnbinnedNLL(model, data, constraints=constraint)\n", - "minimizer = zfit.minimize.Minuit(gradient=True)\n", + "minimizer = zfit.minimize.Minuit(gradient=\"zfit\")\n", "result = minimizer.minimize(nll)\n", "result.hesse();" ] @@ -139,7 +139,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(result.params)" + "print(result)" ] }, { @@ -182,11 +182,13 @@ "# create some data\n", "signal_np_reso = np.random.normal(loc=mu_true, scale=sigma_true * 0.7, size=n_sig_reso)\n", "bkg_np_raw_reso = np.random.exponential(size=20000, scale=900)\n", - "bkg_np_reso = bkg_np_raw_reso[bkg_np_raw_reso<1000][:n_bkg_reso] + 5000\n", + "bkg_np_reso = bkg_np_raw_reso[bkg_np_raw_reso < 1000][:n_bkg_reso] + 5000\n", "\n", "# load data into zfit\n", - "obs_reso = zfit.Space('Bmass_reso', (5000, 6000))\n", - "data_reso = zfit.Data.from_numpy(obs=obs_reso, array=np.concatenate([signal_np_reso, bkg_np_reso], axis=0))" + "obs_reso = zfit.Space('Bmass_reso', 5000, 6000)\n", + "signal_data_reso = zfit.Data(signal_np_reso, obs=obs_reso)\n", + "bkg_data_reso = zfit.Data(bkg_np_reso, obs=obs_reso)\n", + "data_reso = zfit.data.concat([signal_data_reso, bkg_data_reso])" ] }, { @@ -235,7 +237,8 @@ "\n", "sigma_scaled = zfit.ComposedParameter('sigma scaled', # name\n", " sigma_scaled_fn, # function\n", - " params=[sigma, sigma_scaling] # the objects used inside the function\n", + " params=[sigma, sigma_scaling], # the objects used inside the function\n", + " unpack_params=True # we could also just use a `params` argument, a dict\n", " )" ] }, @@ -245,22 +248,19 @@ "metadata": {}, "outputs": [], "source": [ + "reso_sig_yield = zfit.Parameter('reso_sig_yield', n_sig_reso - 100, 0, n_sig_reso * 3,\n", + " step_size=1)\n", "signal_reso = zfit.pdf.Gauss(mu=mu, # the same as for the rare mode\n", " sigma=sigma_scaled,\n", - " obs=obs_reso\n", - " )\n", - "\n", - "lambda_reso = zfit.Parameter('lambda_reso', -0.002, -0.01, 0.0001) # floating\n", - "comb_bkg_reso_pdf = zfit.pdf.Exponential(lambda_reso, obs=obs_reso)\n", + " obs=obs_reso,\n", + " extended=reso_sig_yield)\n", "\n", - "reso_sig_yield = zfit.Parameter('reso_sig_yield', n_sig_reso - 100, 0, n_sig_reso * 3,\n", - " step_size=1) # step size: default is small, use appropriate\n", + "lambda_reso = zfit.Parameter('lambda_reso', -0.002, -0.01, 0.0001)\n", "reso_bkg_yield = zfit.Parameter('reso_bkg_yield', n_bkg_reso + 70, 0, 2e5, step_size=1)\n", + "comb_bkg_reso = zfit.pdf.Exponential(lambda_reso, obs=obs_reso, extended=reso_bkg_yield)\n", + "\n", "\n", - "# Create the extended models\n", - "extended_sig_reso = signal_reso.create_extended(reso_sig_yield)\n", - "extended_bkg_reso = comb_bkg_reso_pdf.create_extended(reso_bkg_yield)\n", - "model_reso = zfit.pdf.SumPDF([extended_bkg_reso, extended_sig_reso])" + "model_reso = zfit.pdf.SumPDF([comb_bkg_reso, signal_reso])" ] }, { @@ -294,7 +294,7 @@ "metadata": {}, "outputs": [], "source": [ - "extended_sig_reso.get_yield()" + "signal_reso.get_yield()" ] }, { @@ -339,10 +339,11 @@ "metadata": {}, "outputs": [], "source": [ - "# Sets the values of the parameters to the result of the simultaneous fit\n", + "# Sets the values of the parameters to the result of the simultaneous fit\n", "# in case they were modified.\n", "zfit.param.set_values(nll_simultaneous.get_params(), result_simultaneous)\n", "\n", + "\n", "def plot_fit_projection(model, data, nbins=30, ax=None):\n", " # The function will be reused.\n", " if ax is None:\n", @@ -371,6 +372,7 @@ "\n", " return ax\n", "\n", + "\n", "fig, axs = plt.subplots(1, 2, figsize=(16, 6))\n", "\n", "for mod, dat, ax, nb in zip(nll_simultaneous.model, nll_simultaneous.data, axs, [30, 60]):\n", @@ -405,7 +407,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we would like to compute the significance of this observation or in other words the probabilty that this observation is the result of the statistical fluctuation. To do so we have to perform an hypothesis test where the null and alternative hypotheses are defined as:\n", + "Now we would like to compute the significance of this observation or, in other words, the probabilty that this observation is the result of the statistical fluctuation. To do so we have to perform an hypothesis test where the null and alternative hypotheses are defined as:\n", "\n", "* $H_{0}$, the null or background only hypothesis, i.e. $N_{sig} = 0$;\n", "* $H_{1}$, the alternative hypothesis, i.e $N_{sig} = \\hat{N}_{sig}$, where $\\hat{N}_{sig}$ is the fitted value of $N_{sig}$ printed above.\n", @@ -521,7 +523,7 @@ "source": [ "# Sets the values of the parameters to the result of the simultaneous fit\n", "zfit.param.set_values(nll_simultaneous.get_params(), result_simultaneous)\n", - "sigma_scaling.floating=False\n", + "sigma_scaling.floating = False\n", "\n", "# Creates a sampler that will draw events from the model\n", "sampler = model.create_sampler()\n", @@ -587,7 +589,7 @@ "from hepstats.hypotests import UpperLimit\n", "from hepstats.hypotests.parameters import POIarray\n", "\n", - "# Background only hypothesis.\n", + "#Background only hypothesis.\n", "bkg_only = POI(sig_yield, 0)\n", "# Range of Nsig values to scan.\n", "sig_yield_scan = POIarray(sig_yield, np.linspace(0, 70, 10))\n", @@ -630,12 +632,12 @@ "source": [ "# Signal distributions.\n", "nsig_sw = 20000\n", - "np_sig_m_sw = signal_reso.sample(nsig_sw).numpy().reshape(-1,)\n", + "np_sig_m_sw = signal_reso.sample(nsig_sw)[\"Bmass_reso\"]\n", "np_sig_t_sw = np.random.exponential(size=nsig_sw, scale=1)\n", "\n", "# Background distributions.\n", "nbkg_sw = 150000\n", - "np_bkg_m_sw = comb_bkg_reso_pdf.sample(nbkg_sw).numpy().reshape(-1,)\n", + "np_bkg_m_sw = comb_bkg_reso.sample(nbkg_sw)[\"Bmass_reso\"]\n", "np_bkg_t_sw = np.random.normal(size=nbkg_sw, loc=2.0, scale=2.5)\n", "\n", "# Lifetime cut.\n", @@ -673,11 +675,11 @@ "outputs": [], "source": [ "# Builds the loss.\n", - "data_sw = zfit.Data.from_numpy(obs=obs_reso, array=np_m_sw)\n", + "data_sw = zfit.Data(np_m_sw, obs=obs_reso)\n", "nll_sw = zfit.loss.ExtendedUnbinnedNLL(model_reso, data_sw)\n", "\n", - "# This parameter was useful in the simultaneous fit but not anymore so we fix it.\n", - "sigma_scaling.floating=False\n", + "#This parameter was useful in the simultaneous fit but not anymore so we fix it.\n", + "sigma_scaling.floating = False\n", "\n", "# Minimizes the loss.\n", "result_sw = minimizer.minimize(nll_sw)\n", diff --git a/guides/custom_models.ipynb b/guides/custom_models.ipynb index ea9adfc..cb33ff3 100644 --- a/guides/custom_models.ipynb +++ b/guides/custom_models.ipynb @@ -18,7 +18,7 @@ "\n", "Following the philosophy of zfit, there are different levels of customization. For the most simple use-case, all we need to do is to provide a function describing the shape and the name of the parameters. This can be done by overriding `_unnormalized_pdf`.\n", "\n", - "To implement a mathematical function in zfit, TensorFlow or z should be used. The latter is a subset of TensorFlow and improves it in some aspects, such as automatic dtype casting, and therefore preferred to use.\n", + "To implement a mathematical function in zfit, znp, a numpy-like interface or z, the backend, should be used. This ensures that the function can be traced and the gradient can be calculated. If a function is not available, `tf` can also be used.\n", "(_There are other ways to use arbitrary Python functions, they will be discussed later on_)." ] }, @@ -36,6 +36,7 @@ "import numpy as np\n", "import tensorflow as tf\n", "import zfit\n", + "import zfit.z.numpy as znp\n", "from zfit import z" ] }, @@ -68,10 +69,11 @@ " \"\"\"Second order polynomial `a + b * x + c * x^2`\"\"\"\n", " _PARAMS = ['b', 'c'] # specify which parameters to take\n", "\n", - " def _unnormalized_pdf(self, x): # implement function, unnormalized\n", - " data = z.unstack_x(x)\n", - " b = self.params['b']\n", - " c = self.params['c']\n", + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params): # implement function, unnormalized\n", + " data = x[0] # axis 0\n", + " b = params['b']\n", + " c = params['c']\n", "\n", " return 1 + b * data + c * data ** 2" ] @@ -99,7 +101,7 @@ }, "outputs": [], "source": [ - "obs = zfit.Space(\"obs1\", limits=(-4, 4))\n", + "obs = zfit.Space(\"obs1\", -4, 4)\n", "\n", "b = zfit.Parameter('b', 0.2, 0.1, 10)\n", "custom_poly = SecondOrderPoly(obs=obs, b=b, c=1.4)" @@ -156,17 +158,18 @@ "outputs": [], "source": [ "# define the integral function\n", + "\n", + "\n", "def cdf_poly(limit, b, c):\n", " return limit + 0.5 * b * limit ** 2 + 1 / 3 * c * limit ** 3\n", "\n", + "\n", "def integral_func(limits, norm_range, params, model):\n", "\n", " b = params['b']\n", " c = params['c']\n", "\n", - " lower, upper = limits.limit1d\n", - " lower = z.convert_to_tensor(lower) # the limits are now 1-D, for axis 1\n", - " upper = z.convert_to_tensor(upper)\n", + " lower, upper = limits.v1.limits\n", "\n", " # calculate the integral\n", " integral = cdf_poly(upper, b, c) - cdf_poly(lower, b, c)\n", @@ -265,17 +268,20 @@ " _PARAMS = ['FL', 'S3', 'S4', 'S5', 'AFB', 'S7', 'S8', 'S9']\n", " _N_OBS = 3\n", "\n", - " def _unnormalized_pdf(self, x):\n", - " FL = self.params['FL']\n", - " S3 = self.params['S3']\n", - " S4 = self.params['S4']\n", - " S5 = self.params['S5']\n", - " AFB = self.params['AFB']\n", - " S7 = self.params['S7']\n", - " S8 = self.params['S8']\n", - " S9 = self.params['S9']\n", - "\n", - " costheta_l, costheta_k, phi = z.unstack_x(x)\n", + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", + " FL = params['FL']\n", + " S3 = params['S3']\n", + " S4 = params['S4']\n", + " S5 = params['S5']\n", + " AFB = params['AFB']\n", + " S7 = params['S7']\n", + " S8 = params['S8']\n", + " S9 = params['S9']\n", + "\n", + " costheta_l = x[0]\n", + " costheta_k = x[1]\n", + " phi = x[2]\n", "\n", " sintheta_k = tf.sqrt(1.0 - costheta_k * costheta_k)\n", " sintheta_l = tf.sqrt(1.0 - costheta_l * costheta_l)\n", @@ -291,13 +297,13 @@ " FL * costheta_k * costheta_k +\n", " (1.0 / 4.0) * (1.0 - FL) * sintheta_2k * cos2theta_l +\n", " -1.0 * FL * costheta_k * costheta_k * cos2theta_l +\n", - " S3 * sintheta_2k * sintheta_2l * tf.cos(2.0 * phi) +\n", - " S4 * sin2theta_k * sin2theta_l * tf.cos(phi) +\n", - " S5 * sin2theta_k * sintheta_l * tf.cos(phi) +\n", + " S3 * sintheta_2k * sintheta_2l * znp.cos(2.0 * phi) +\n", + " S4 * sin2theta_k * sin2theta_l * znp.cos(phi) +\n", + " S5 * sin2theta_k * sintheta_l * znp.cos(phi) +\n", " (4.0 / 3.0) * AFB * sintheta_2k * costheta_l +\n", - " S7 * sin2theta_k * sintheta_l * tf.sin(phi) +\n", - " S8 * sin2theta_k * sin2theta_l * tf.sin(phi) +\n", - " S9 * sintheta_2k * sintheta_2l * tf.sin(2.0 * phi))\n", + " S7 * sin2theta_k * sintheta_l * znp.sin(phi) +\n", + " S8 * sin2theta_k * sin2theta_l * znp.sin(phi) +\n", + " S9 * sintheta_2k * sintheta_2l * znp.sin(2.0 * phi))\n", "\n", " return pdf" ] @@ -664,9 +670,11 @@ "source": [ "x_tf = z.constant(42.)\n", "\n", + "\n", "def sqrt(x):\n", " return np.sqrt(x)\n", "\n", + "\n", "y = z.py_function(func=sqrt, inp=[x_tf], Tout=tf.float64)" ] }, @@ -758,12 +766,13 @@ "class NumpyGauss(zfit.pdf.ZPDF):\n", " _PARAMS = ['mu', 'sigma']\n", "\n", - " def _unnormalized_pdf(self, x):\n", + " @zfit.supports()\n", + " def _unnormalized_pdf(self, x, params):\n", " zfit.run.assert_executing_eagerly() # make sure we're eager\n", - " data = z.unstack_x(x)\n", - " mu = self.params['mu']\n", - " sigma = self.params['sigma']\n", - " return z.convert_to_tensor(np.exp( - 0.5 * (data - mu) ** 2 / sigma ** 2))" + " data = x[0]\n", + " mu = params['mu']\n", + " sigma = params['sigma']\n", + " return np.exp( - 0.5 * (data - mu) ** 2 / sigma ** 2) # note that we use numpy here" ] }, { @@ -774,7 +783,7 @@ } }, "source": [ - "Make sure to return a Tensor again, otherwise there will be an error." + "This can be tested and compared." ] }, { diff --git a/introduction/Introduction.ipynb b/introduction/Introduction.ipynb index e4e7a4b..bcd1757 100644 --- a/introduction/Introduction.ipynb +++ b/introduction/Introduction.ipynb @@ -49,7 +49,7 @@ "\n", "zfit knows unbinned and binned datasets.\n", "\n", - "The unbinned `Data` can load data from various sources, most notably from Numpy, Pandas DataFrame, TensorFlow Tensor and ROOT (using uproot). It is also possible, for convenience, to convert it directly `to_pandas`. The constructors are named `from_numpy`, `from_root` etc." + "The unbinned `Data` can load data from various sources, such as Numpy, Pandas DataFrame, TensorFlow Tensor and ROOT (using uproot). It is also possible, for convenience, to convert it directly `to_pandas`. The constructors are named `from_numpy`, `from_root` etc." ] }, { @@ -71,7 +71,9 @@ }, "outputs": [], "source": [ - "obs = zfit.Space('obs1', (-5, 10))" + "obs = zfit.Space('obs1', -5, 10)\n", + "# or more explicitly\n", + "obs = zfit.Space(obs='obs1', lower=-5, upper=10)" ] }, { @@ -79,7 +81,7 @@ "metadata": {}, "source": [ "This `Space` has limits and offers the following functionality:\n", - "- area(): calculate the area (e.g. distance between upper and lower)\n", + "- volume(): calculate the volume (e.g. distance between upper and lower)\n", "- inside(): return a boolean Tensor corresponding to whether the value is _inside_ the `Space`\n", "- filter(): filter the input values to only return the one inside" ] @@ -97,7 +99,7 @@ "size_normal = 10_000\n", "data_normal_np = np.random.normal(size=size_normal, scale=2)\n", "\n", - "data_normal = zfit.Data.from_numpy(obs=obs, array=data_normal_np)" + "data_normal = zfit.Data(data_normal_np, obs=obs)" ] }, { @@ -106,11 +108,15 @@ "source": [ "The main functionality is\n", "- nevents: attribute that returns the number of events in the object\n", - "- data_range: a `Space` that defines the limits of the data; if outside, the data will be cut\n", - "- n_obs: defines the number of dimensions in the dataset\n", - "- with_obs: returns a subset of the dataset with only the given obs\n", + "- space: a `Space` that defines the limits of the data; if outside, the data will be cut (!)\n", + "- n_obs: the number of dimensions in the dataset\n", + "- with_obs: returns a dataset, possibly a subset of the dataset with only the given obs\n", + "- `with_weights`: returns a dataset with the given weights\n", "- weights: event weights\n", "\n", + "Using indexing `data[obs]` will return a `Data` object with only the given observables and follows a similar behavior as Pandas DataFrames.\n", + "A string, i.e. a single observable, returns an array, using alist of strings returns a `Data` object with the given observables.\n", + "\n", "Furthermore, `value` returns a Tensor with shape `(nevents, n_obs)`." ] }, @@ -377,9 +383,11 @@ "source": [ "### Tensors\n", "\n", - "As we see, many zfit functions return Tensors. This is however no magical thing! If we're outside of models, then we can always safely convert them to a numpy array by calling `zfit.run(...)` on it (or any structure containing potentially multiple Tensors) or simply `np.array`. However, this may not even be required often! They can be added just like numpy arrays and interact well with Python and Numpy:\n", + "As we see, many zfit functions return Tensors. Usually, these are `array-Like` objects that resemble numpy arrays close enough that they can be directly used in-place of actual numpy arrays.\n", + "\n", + "If explicitly needed, we can always safely convert them to a numpy array by calling `np.asarray(tensor)`. \n", "\n", - "[**Extended tutorial on TensorFlow**](https://zfit-tutorials.readthedocs.io/en/master/tutorials/TensorFlow/HPC_with_TensorFlow.html)" + "More information about the backend can be found in [**Extended tutorial on TensorFlow**](https://zfit-tutorials.readthedocs.io/en/master/tutorials/TensorFlow/HPC_with_TensorFlow.html)" ] }, { @@ -392,7 +400,7 @@ }, "outputs": [], "source": [ - "np.sqrt(integral)" + "np.sqrt(integral) # works out-of-the-box" ] }, { @@ -606,7 +614,7 @@ }, "outputs": [], "source": [ - "mass_obs = zfit.Space('mass', (0, 1000))" + "mass_obs = zfit.Space('mass', 0, 1000)" ] }, { @@ -834,7 +842,10 @@ "outputs": [], "source": [ "yield_model = zfit.Parameter('yield_model', 10000, 0, 20000, step_size=10)\n", - "model_ext = model.create_extended(yield_model)" + "# model_ext = model.create_extended(yield_model) # using an existing model\n", + "\n", + "# when creating a new model \n", + "model_ext = zfit.pdf.SumPDF([signal, comb_bkg, part_reco], [sig_frac, comb_bkg_frac], extended=yield_model)" ] }, { diff --git a/introduction/Introduction_long.ipynb b/introduction/Introduction_long.ipynb index 92c73fd..c7c665e 100644 --- a/introduction/Introduction_long.ipynb +++ b/introduction/Introduction_long.ipynb @@ -104,7 +104,7 @@ }, "outputs": [], "source": [ - "obs = zfit.Space('obs1', (-5, 10))" + "obs = zfit.Space('obs1', -5, 10)" ] }, { diff --git a/introduction/Quickstart.ipynb b/introduction/Quickstart.ipynb index 347104e..eb97684 100644 --- a/introduction/Quickstart.ipynb +++ b/introduction/Quickstart.ipynb @@ -80,7 +80,7 @@ }, "outputs": [], "source": [ - "obs = zfit.Space('x', limits=(-10, 10))" + "obs = zfit.Space('x', -10, 10)" ] }, { @@ -186,14 +186,7 @@ } }, "source": [ - "This pdf contains several useful methods, such as calculating a probability, calculating its integral, sampling etc.\n", - "\n", - "**Note**: Several objects that are returned from methods, like `integrate`, return `tf.Tensor`, which are wrapped Numpy arrays.\n", - "They can directly be used as such or explicitly converted to by calling:\n", - "\n", - "```python\n", - "zfit.run(TensorFlow_object)\n", - "```" + "This pdf contains several useful methods, such as calculating a probability, calculating its integral, sampling etc." ] }, { @@ -209,9 +202,7 @@ "# Let's get some probabilities.\n", "consts = [-1, 0, 1]\n", "probs = gauss.pdf(consts)\n", - "# And now execute the tensorflow graph\n", - "result = zfit.run(probs)\n", - "print(f\"x values: {consts}\\nresult: {result}\")" + "print(f\"x values: {consts}\\nresult: {probs}\")" ] }, { @@ -300,8 +291,8 @@ "range_ = (-5,5)\n", "_ = plt.hist(data_np, bins=n_bins, range=range_)\n", "x = np.linspace(*range_, num=1000)\n", - "pdf = zfit.run(gauss.pdf(x))\n", - "_ = plt.plot(x, data_np.shape[0] / n_bins * (range_[1] - range_[0]) * pdf)" + "probs = gauss.pdf(x)\n", + "_ = plt.plot(x, data_np.shape[0] / n_bins * (range_[1] - range_[0]) * probs)" ] }, {