import numpy as np
import scipy.stats as st
from scipy.optimize import minimizeB = st.bernoulli(0.75)
np.random.seed(291)
x = B.rvs(size = 101)def ll_bernoulli(theta, data):
    return -np.sum(st.bernoulli(theta).logpmf(data))rng = np.random.default_rng()
init = rng.random()
print(init)
minimize(ll_bernoulli, init, args = (x,), bounds = [(1e-5, 1 - 1e-5), ])0.6442960015361123  message: CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL
  success: True
   status: 0
      fun: 52.93785788218957
        x: [ 7.822e-01]
      nit: 5
      jac: [ 2.842e-06]
     nfev: 14
     njev: 7
 hess_inv: <1x1 LbfgsInvHessProduct with dtype=float64>def ll_exponential(theta, data):
    return -np.sum(st.expon(scale = theta).logpdf(data))E = st.expon(scale = 5.5)
x = E.rvs(size = 1001)
init = rng.random()
minimize(ll_exponential, init, args = (x), bounds = [(1e-5, np.inf)])  message: CONVERGENCE: REL_REDUCTION_OF_F_<=_FACTR*EPSMCH
  success: True
   status: 0
      fun: 2708.037801958211
        x: [ 5.503e+00]
      nit: 13
      jac: [ 4.547e-05]
     nfev: 30
     njev: 15
 hess_inv: <1x1 LbfgsInvHessProduct with dtype=float64>