Skip to content

misc

kernels

Alpha(x, sigma, reverse=False)

Parameters:

Name Type Description Default
x ndarray required
sigma float required
reverse bool

Reverse the direction of the kernel.

False

Returns:

Type Description
ndarray
(x >= 0) * 2. * (x / sigma ** 2) * np.exp(-x * np.sqrt(2.) / sigma)
Source code in indl/misc/kernels.py
def Alpha(x: np.ndarray, sigma: float, reverse: bool = False) -> np.ndarray:
    """

    Args:
        x:
        sigma:
        reverse: Reverse the direction of the kernel.

    Returns:
        ```math
        (x >= 0) * 2. * (x / sigma ** 2) * np.exp(-x * np.sqrt(2.) / sigma)
        ```
    """
    if reverse:
        return (x <= 0) * -2. * (x / sigma ** 2) * np.exp(x * np.sqrt(2.) / sigma)
    else:
        return (x >= 0) * 2. * (x / sigma ** 2) * np.exp(-x * np.sqrt(2.) / sigma)

Boxcar(x, sigma, reverse=None)

Parameters:

Name Type Description Default
x np.ndarray required
sigma float required
reverse Optional[bool]

unused

None

Returns:

Type Description
ndarray
(0.5 / (np.sqrt(3.0) * sigma)) * (np.abs(x) < (np.sqrt(3.0) * sigma))
Source code in indl/misc/kernels.py
def Boxcar(x: np.ndarray, sigma: float, reverse: Optional[bool] = None) -> np.ndarray:
    """

    Args:
        x (np.ndarray):
        sigma:
        reverse: unused

    Returns:
        ```math
        (0.5 / (np.sqrt(3.0) * sigma)) * (np.abs(x) < (np.sqrt(3.0) * sigma))
        ```
    """
    return (0.5 / (np.sqrt(3.0) * sigma)) * (np.abs(x) < (np.sqrt(3.0) * sigma))

Cauchy(x, w, reverse=None)

Parameters:

Name Type Description Default
x np.ndarray required
w float required
reverse Optional[bool]

unused

None

Returns:

Type Description
ndarray
1 / (np.pi * w * (1 + (x / w)**2))
Source code in indl/misc/kernels.py
def Cauchy(x: np.ndarray, w: float, reverse: Optional[bool] = None) -> np.ndarray:
    """

    Args:
        x (np.ndarray):
        w:
        reverse: unused

    Returns:
        ```math
        1 / (np.pi * w * (1 + (x / w)**2))
        ```
    """
    return 1 / (np.pi * w * (1 + (x / w)**2))

Exponential(x, sigma, reverse=False)

Parameters:

Name Type Description Default
x ndarray required
sigma float required
reverse bool False

Returns:

Type Description
ndarray
(x >= 0) * (1. / sigma) * np.exp(-x / sigma)
Source code in indl/misc/kernels.py
def Exponential(x: np.ndarray, sigma: float, reverse: bool = False) -> np.ndarray:
    """

    Args:
        x:
        sigma:
        reverse:

    Returns:
        ```math
        (x >= 0) * (1. / sigma) * np.exp(-x / sigma)
        ```
    """
    if reverse:
        return (x <= 0) * (1. / sigma) * np.exp(x / sigma)
    else:
        return (x >= 0) * (1. / sigma) * np.exp(-x / sigma)

Gauss(x, sigma, reverse=None)

Parameters:

Name Type Description Default
x ndarray required
sigma float required
reverse Optional[bool]

unused

None

Returns:

Type Description
ndarray
(1.0 / (np.sqrt(2.0 * np.pi) * sigma)) * np.exp(-0.5 * (x / sigma) ** 2)
Source code in indl/misc/kernels.py
def Gauss(x: np.ndarray, sigma: float, reverse: Optional[bool] = None) -> np.ndarray:
    """

    Args:
        x:
        sigma:
        reverse: unused

    Returns:
        ```math
        (1.0 / (np.sqrt(2.0 * np.pi) * sigma)) * np.exp(-0.5 * (x / sigma) ** 2)
        ```
    """
    return (1.0 / (np.sqrt(2.0 * np.pi) * sigma)) * np.exp(-0.5 * (x / sigma) ** 2)

Laplace(x, w, reverse=None)

Parameters:

Name Type Description Default
x np.ndarray required
w float required
reverse Optional[bool]

unused

None

Returns:

Type Description
ndarray
1 / 2**0.5 / w * np.exp(-(2**0.5) / w / np.abs(x))
Source code in indl/misc/kernels.py
def Laplace(x: np.ndarray, w: float, reverse: Optional[bool] = None) -> np.ndarray:
    """

    Args:
        x (np.ndarray):
        w:
        reverse: unused

    Returns:
        ```math
        1 / 2**0.5 / w * np.exp(-(2**0.5) / w / np.abs(x))
        ```
    """
    return 1 / 2**0.5 / w * np.exp(-(2**0.5) / w / np.abs(x))

fftkernel(x, w)

Parameters:

Name Type Description Default
x ndarray required
w float required
Source code in indl/misc/kernels.py
def fftkernel(x: np.ndarray, w: float) -> np.ndarray:
    """

    Args:
        x:
        w:

    Returns:

    """
    # forward padded transform
    L = x.size
    Lmax = L + 3 * w
    n = int(2 ** np.ceil(np.log2(Lmax)))
    X = np.fft.fft(x, n)

    # generate kernel domain
    f = np.linspace(0, n-1, n) / n
    f = np.concatenate((-f[0: np.int(n / 2 + 1)],
                        f[1: np.int(n / 2 - 1 + 1)][::-1]))

    # evaluate kernel
    K = np.exp(-0.5 * (w * 2 * np.pi * f) ** 2)

    # convolve and transform back from frequency domain
    y = np.real(np.fft.ifft(X * K, n))
    y = y[0:L]

    return y

fftkernelWin(x, w, WinFunc)

Parameters:

Name Type Description Default
x ndarray

data

required
w float

kernel parameter

required
WinFunc str

'Boxcar', 'Laplace', 'Cauchy', 'Gauss' (default)

required
Source code in indl/misc/kernels.py
def fftkernelWin(x: np.ndarray, w: float, WinFunc: str) -> np.ndarray:
    """

    Args:
        x: data
        w: kernel parameter
        WinFunc: 'Boxcar', 'Laplace', 'Cauchy', 'Gauss' (default)

    Returns:

    """
    # forward padded transform
    L = x.size
    Lmax = L + 3 * w
    n = 2 ** np.ceil(np.log2(Lmax))
    X = np.fft.fft(x, n.astype(np.int))

    # generate kernel domain
    f = np.linspace(0, n-1, n) / n
    f = np.concatenate((-f[0: np.int(n / 2 + 1)],
                        f[1: np.int(n / 2 - 1 + 1)][::-1]))
    t = 2 * np.pi * f

    # determine window function - evaluate kernel
    if WinFunc == 'Boxcar':
        a = 12**0.5 * w
        K = 2 * np.sin(a * t / 2) / (a * t)
        K[0] = 1
    elif WinFunc == 'Laplace':
        K = 1 / (1 + (w * 2 * np.pi * f)**2 / 2)
    elif WinFunc == 'Cauchy':
        K = np.exp(-w * np.abs(2 * np.pi * f))
    else:  # WinFunc == 'Gauss'
        K = np.exp(-0.5 * (w * 2 * np.pi * f)**2)

    # convolve and transform back from frequency domain
    y = np.real(np.fft.ifft(X * K, n))
    y = y[0:L]

    return y

ilogexp(x)

Parameters:

Name Type Description Default
x ndarray required
Source code in indl/misc/kernels.py
def ilogexp(x: np.ndarray) -> np.ndarray:
    """

    Args:
        x:

    Returns:

    """
    # TODO: Check dtype and convert x scalars to numpy array
    y = np.zeros(x.shape)
    y[x < 1e2] = np.log(np.exp(x[x < 1e2]) - 1)
    y[x >= 1e2] = x[x >= 1e2]
    return y

logexp(x)

Parameters:

Name Type Description Default
x ndarray required
Source code in indl/misc/kernels.py
def logexp(x: np.ndarray) -> np.ndarray:
    """

    Args:
        x:

    Returns:

    """
    # TODO: Check dtype and convert x scalars to numpy array
    y = np.zeros(x.shape)
    y[x < 1e2] = np.log(1+np.exp(x[x < 1e2]))
    y[x >= 1e2] = x[x >= 1e2]
    return y

sshist(x, N=range(2, 501), SN=30)

Returns the optimal number of bins in a histogram used for density estimation.

Optimization principle is to minimize expected L2 loss function between the histogram and an unknown underlying density function. An assumption made is merely that samples are drawn from the density independently each other.

The optimal binwidth D* is obtained as a minimizer of the formula, (2K-V) / D^2, where K and V are mean and variance of sample counts across bins with width D. Optimal number of bins is given as (max(x) - min(x)) / D.

Parameters

x : array_like One-dimensional data to fit histogram to. N : array_like, optional Array containing number of histogram bins to evaluate for fit. Default value = 500. SN : double, optional Scalar natural number defining number of bins for shift-averaging.

Returns

optN : int Optimal number of bins to represent the data in X N : double Maximum number of bins to be evaluated. Default value = 500. C : array_like Cost function C[i] of evaluating histogram fit with N[i] bins

See Also

sskernel, ssvkernel

References

.. [1] H. Shimazaki and S. Shinomoto, "A method for selecting the bin size of a time histogram," in Neural Computation 19(6), 1503-1527, 2007 http://dx.doi.org/10.1162/neco.2007.19.6.1503

Source code in indl/misc/kernels.py
def sshist(x: np.ndarray, N=range(2, 501), SN: int=30) -> Tuple[int, float, np.ndarray]:
    """
    Returns the optimal number of bins in a histogram used for density
    estimation.

    Optimization principle is to minimize expected L2 loss function between
    the histogram and an unknown underlying density function.
    An assumption made is merely that samples are drawn from the density
    independently each other.

    The optimal binwidth D* is obtained as a minimizer of the formula,
    (2K-V) / D^2,
    where K and V are mean and variance of sample counts across bins with width
    D. Optimal number of bins is given as (max(x) - min(x)) / D.

    Parameters
    ----------
    x : array_like
        One-dimensional data to fit histogram to.
    N : array_like, optional
        Array containing number of histogram bins to evaluate for fit.
        Default value = 500.
    SN : double, optional
        Scalar natural number defining number of bins for shift-averaging.

    Returns
    -------
    optN : int
        Optimal number of bins to represent the data in X
    N : double
        Maximum number of bins to be evaluated. Default value = 500.
    C : array_like
        Cost function C[i] of evaluating histogram fit with N[i] bins

    See Also
    --------
    sskernel, ssvkernel

    References
    ----------
    .. [1] H. Shimazaki and S. Shinomoto, "A method for selecting the bin size
           of a time histogram," in  Neural Computation 19(6), 1503-1527, 2007
           http://dx.doi.org/10.1162/neco.2007.19.6.1503
    """

    # determine range of input 'x'
    x_min = np.min(x)
    x_max = np.max(x)

    # get smallest difference 'dx' between all pairwise samples
    buf = np.abs(np.diff(np.sort(x)))
    dx = min(buf[buf > 0])

    # setup bins to evaluate
    N_MIN = 2
    N_MAX = min(np.floor((x_max - x_min) / (2*dx)), max(N))
    N = range(N_MIN, N_MAX+1)
    D = (x_max - x_min) / N

    # compute cost function over each possible number of bins
    Cs = np.zeros((len(N), SN))
    for i, n in enumerate(N):  # loop over number of bins
        shift = np.linspace(0, D[i], SN)
        for p, sh in enumerate(shift):  # loop over shift window positions

            # define bin edges
            edges = np.linspace(x_min + sh - D[i]/2,
                                x_max + sh - D[i]/2, N[i]+1)

            # count number of events in these bins
            ki = np.histogram(x, edges)

            # get mean and variance of events
            k = ki[0].mean()
            v = np.sum((ki[0] - k)**2) / N[i]

            Cs[i, p] = (2*k - v) / D[i]**2

    # average over shift window
    C = Cs.mean(axis=1)

    # get bin count that minimizes cost C
    idx = np.argmin(C)
    optN = N[idx]
    optD = D[idx]
    edges = np.linspace(x_min, x_max, optN)

    return optN, optD, edges, C, N

sskernel(x, tin=None, W=None, nbs=1000.0)

Generates a kernel density estimate with globally-optimized bandwidth.

The optimal bandwidth is obtained as a minimizer of the formula, sum_{i,j} \int k(x - x_i) k(x - x_j) dx - 2 sum_{i~=j} k(x_i - x_j), where k(x) is the kernel function.

Parameters

x : array_like The one-dimensional samples drawn from the underlying density tin : array_like, optional The values where the density estimate is to be evaluated in generating the output 'y'. W : array_like, optional The kernel bandwidths to use in optimization. Should not be chosen smaller than the sampling resolution of 'x'. nbs : int, optional The number of bootstrap samples to use in estimating the [0.05, 0.95] confidence interval of the output 'y'

Returns

y : array_like The estimated density, evaluated at points t / tin. t : array_like The points where the density estimate 'y' is evaluated. optw : double The optimal global kernel bandwidth. W : array_like The kernel bandwidths evaluated during optimization. C : array_like The cost functions associated with the bandwidths 'W'. confb95 : array_like The 5% and 95% confidence interval of the kernel density estimate 'y'. Has dimensions 2 x len(y). confb95[0,:] corresponds to the 5% interval, and confb95[1,:] corresponds to the 95% interval. yb : array_like The bootstrap samples used in estimating confb95. Each row corresponds to one bootstrap sample.

See Also

sshist, ssvkernel

References

.. [1] H. Shimazaki and S. Shinomoto, "Kernel Bandwidth Optimization in Spike Rate Estimation," in Journal of Computational Neuroscience 29(1-2): 171–182, 2010 http://dx.doi.org/10.1007/s10827-009-0180-4

Source code in indl/misc/kernels.py
def sskernel(x: np.ndarray, tin: Optional[np.ndarray] = None, W: Optional[np.ndarray] = None, nbs: Optional[int] = 1e3
             ) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    """
    Generates a kernel density estimate with globally-optimized bandwidth.

    The optimal bandwidth is obtained as a minimizer of the formula, sum_{i,j}
    \int k(x - x_i) k(x - x_j) dx  -  2 sum_{i~=j} k(x_i - x_j), where k(x) is
    the kernel function.


    Parameters
    ----------
    x : array_like
        The one-dimensional samples drawn from the underlying density
    tin : array_like, optional
        The values where the density estimate is to be evaluated in generating
        the output 'y'.
    W : array_like, optional
        The kernel bandwidths to use in optimization. Should not be chosen
        smaller than the sampling resolution of 'x'.
    nbs : int, optional
        The number of bootstrap samples to use in estimating the [0.05, 0.95]
        confidence interval of the output 'y'

    Returns
    -------
    y : array_like
        The estimated density, evaluated at points t / tin.
    t : array_like
        The points where the density estimate 'y' is evaluated.
    optw : double
        The optimal global kernel bandwidth.
    W : array_like
        The kernel bandwidths evaluated during optimization.
    C : array_like
        The cost functions associated with the bandwidths 'W'.
    confb95 : array_like
        The 5% and 95% confidence interval of the kernel density estimate 'y'.
        Has dimensions 2 x len(y). confb95[0,:] corresponds to the 5% interval,
        and confb95[1,:] corresponds to the 95% interval.
    yb : array_like
        The bootstrap samples used in estimating confb95. Each row corresponds
        to one bootstrap sample.

    See Also
    --------
    sshist, ssvkernel

    References
    ----------
    .. [1] H. Shimazaki and S. Shinomoto, "Kernel Bandwidth Optimization in
           Spike Rate Estimation," in Journal of Computational Neuroscience
           29(1-2): 171–182, 2010 http://dx.doi.org/10.1007/s10827-009-0180-4
    """

    def CostFunction(y_hist, N, w, dt):

        # build normal smoothing kernel
        yh = fftkernel(y_hist, w / dt)

        # formula for density
        C = np.sum(yh ** 2) * dt - 2 * np.sum(yh * y_hist) * dt + 2 \
                                                                  / (2 * np.pi) ** 0.5 / w / N
        C = C * N ** 2

        return C, yh

    T = np.max(x) - np.min(x)           # Total span of input spike times
    dx = np.sort(np.diff(np.sort(x)))   # Sorted ISIs
    dt_samp = dx[np.nonzero(dx)][0]     # Smallest nonzero ISI

    # set argument 't' if not provided
    if tin is None:
        tin = np.linspace(np.min(x), np.max(x), int(min(np.ceil(T / dt_samp), 1e3)))
        t = tin
    else:
        if dt_samp > min(np.diff(tin)):
            t = np.linspace(min(tin), max(tin), int(min(np.ceil(T / dt_samp), 1e3)))
        else:
            t = tin

    x_ab = x[(x >= min(tin)) & (x <= max(tin))]  # Spike times that fall within tin

    # calculate delta t
    dt = min(np.diff(t))

    # create the finest histogram
    thist_edges = np.r_[t - dt / 2, t[-1] + dt/2]
    y_hist, _ = np.histogram(x_ab, thist_edges)
    N = sum(y_hist).astype(np.float)
    y_hist = y_hist / (N * dt)  # density

    # global search if input 'W' is defined
    if W is not None:
        C = np.zeros((1, len(W)))
        C_min = np.Inf
        for k, w in enumerate(W):
            C[k], yh = CostFunction(y_hist, N, w, dt)
            if(C[k] < C_min):
                C_min = C[k]
                optw = w
                y = yh
    else:  # optimized search using golden section
        k = 0
        C = np.zeros((20, 1))
        W = np.zeros((20, 1))
        Wmin = 2*dt
        Wmax = np.max(x) - np.min(x)
        tol = 10e-5
        phi = (5**0.5 + 1) / 2
        a = ilogexp(Wmin)
        b = ilogexp(Wmax)
        c1 = (phi - 1) * a + (2 - phi) * b
        c2 = (2 - phi) * a + (phi - 1) * b
        f1, dummy = CostFunction(y_hist, N, logexp(c1), dt)
        f2, dummy = CostFunction(y_hist, N, logexp(c2), dt)
        while (np.abs(b-a) > tol * (np.abs(c1) + np.abs(c2))) & (k < 20):
            if f1 < f2:
                b = c2
                c2 = c1
                c1 = (phi - 1) * a + (2 - phi) * b
                f2 = f1
                f1, yh1 = CostFunction(y_hist, N, logexp(c1), dt)
                W[k] = logexp(c1)
                C[k] = f1
                optw = logexp(c1)
                y = yh1 / np.sum(yh1 * dt)
            else:
                a = c1
                c1 = c2
                c2 = (2 - phi) * a + (phi - 1) * b
                f1 = f2
                f2, yh2 = CostFunction(y_hist, N, logexp(c2), dt)
                W[k] = logexp(c2)
                C[k] = f2
                optw = logexp(c2)
                y = yh2 / np.sum(yh2 * dt)

            # increment iteration counter
            k = k + 1

        # discard unused entries in gs, C
        C = C[0:k]
        W = W[0:k]

    # estimate confidence intervals by bootstrapping
    yb = None
    confb95 = None
    if nbs > 0:
        nbs = np.asarray(nbs)
        yb = np.zeros((nbs, len(tin)))
        for i in range(nbs):
            idx = np.random.randint(0, len(x_ab)-1, len(x_ab))
            xb = x_ab[idx]
            thist = np.concatenate((t, (t[-1]+dt)[np.newaxis]))
            y_histb = np.histogram(xb, thist - dt / 2)[0] / dt / N
            yb_buf = fftkernel(y_histb, optw / dt)
            yb_buf = yb_buf / np.sum(yb_buf * dt)
            yb[i, ] = np.interp(tin, t, yb_buf)
        ybsort = np.sort(yb, axis=0)
        y95b = ybsort[np.int(np.floor(0.05 * nbs)), :]
        y95u = ybsort[np.int(np.floor(0.95 * nbs)), :]
        confb95 = np.concatenate((y95b[np.newaxis], y95u[np.newaxis]), axis=0)

    # return outputs
    y = np.interp(tin, t, y)
    t = tin

    return y, t, optw, W, C, confb95, yb

ssvkernel(x, tin=None, M=80, nbs=100.0, WinFunc='Boxcar')

Generates a locally adaptive kernel-density estimate for one-dimensional data.

The user provides a one-dimensional vector of samples drawn from some underlying unknown distribution, and optionally the values where they want to estimate the probability density of that distribution. The algorithm solves an optimization problem to identify variable bandwidths across the domain where the data is provided.

The optimization is based on a principle of minimizing expected L2 loss function between the kernel estimate and an unknown underlying density function. An assumption is merely that samples are drawn from the density independently of each other.

The locally adaptive bandwidth is obtained by iteratively computing optimal fixed-size bandwidths wihtihn local intervals. The optimal bandwidths are selected such that they are selected in the intervals that are gamma times larger than the optimal bandwidths themselves. The paramter gamma is optimized by minimizing the L2 risk estimate.

Parameters

x : array_like The one-dimensional samples drawn from the underlying density tin : array_like, optional The values where the density estimate is to be evaluated in generating the output 'y'. Default value = None. M : int, optional The number of window sizes to evaluate. Default value = 80. nbs : int, optional The number of bootstrap samples to use in estimating the [0.05, 0.95] confidence interval of the output 'y'. WinFunc : string, optional The type of window function to use in estimating local bandwidth. Choose from one of 'Boxcar', 'Laplace', 'Cauchy' and 'Gauss'. Default value = 'Gauss'.

Returns

y : array_like The estimated density, evaluated at points t / tin. t : array_like The points where the density estimate 'y' is evaluated. optw : array_like The optimal local kernel bandwidths at 't'. gs : array_like The stiffness constants of the variables bandwidths evaluated. C : array_like Cost functions associated with stiffness constraints. confb95 : array_like The 5% and 95% confidence interval of the kernel density estimate 'y'. Has dimensions 2 x len(y). confb95[0,:] corresponds to the 5% interval, and confb95[1,:] corresponds to the 95% interval. yb : array_like The bootstrap samples used in estimating confb95. Each row corresponds to one bootstrap sample.

See Also

sshist, sskernel

References

.. [1] H. Shimazaki and S. Shinomoto, "Kernel Bandwidth Optimization in Spike Rate Estimation," in Journal of Computational Neuroscience 29(1-2): 171–182, 2010 http://dx.doi.org/10.1007/s10827-009-0180-4

Source code in indl/misc/kernels.py
def ssvkernel(x: np.ndarray, tin: Optional[np.ndarray] = None, M: int = 80, nbs: int = 1e2, WinFunc: str = 'Boxcar'
              ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
    """
    Generates a locally adaptive kernel-density estimate for one-dimensional
    data.

    The user provides a one-dimensional vector of samples drawn from some
    underlying unknown distribution, and optionally the values where they want
    to estimate the probability density of that distribution. The algorithm
    solves an optimization problem to identify variable bandwidths across the
    domain where the data is provided.

    The optimization is based on a principle of minimizing expected L2 loss
    function between the kernel estimate and an unknown underlying density
    function. An assumption is merely that samples are drawn from the density
    independently of each other.

    The locally adaptive bandwidth is obtained by iteratively computing optimal
    fixed-size bandwidths wihtihn local intervals. The optimal bandwidths are
    selected such that they are selected in the intervals that are gamma times
    larger than the optimal bandwidths themselves. The paramter gamma is
    optimized by minimizing the L2 risk estimate.

    Parameters
    ----------
    x : array_like
        The one-dimensional samples drawn from the underlying density
    tin : array_like, optional
        The values where the density estimate is to be evaluated in generating
        the output 'y'. Default value = None.
    M : int, optional
        The number of window sizes to evaluate. Default value = 80.
    nbs : int, optional
        The number of bootstrap samples to use in estimating the [0.05, 0.95]
        confidence interval of the output 'y'.
    WinFunc : string, optional
        The type of window function to use in estimating local bandwidth.
        Choose from one of 'Boxcar', 'Laplace', 'Cauchy' and 'Gauss'. Default
        value = 'Gauss'.

    Returns
    -------
    y : array_like
        The estimated density, evaluated at points t / tin.
    t : array_like
        The points where the density estimate 'y' is evaluated.
    optw : array_like
        The optimal local kernel bandwidths at 't'.
    gs : array_like
        The stiffness constants of the variables bandwidths evaluated.
    C : array_like
        Cost functions associated with stiffness constraints.
    confb95 : array_like
        The 5% and 95% confidence interval of the kernel density estimate 'y'.
        Has dimensions 2 x len(y). confb95[0,:] corresponds to the 5% interval,
        and confb95[1,:] corresponds to the 95% interval.
    yb : array_like
        The bootstrap samples used in estimating confb95. Each row corresponds
        to one bootstrap sample.

    See Also
    --------
    sshist, sskernel

    References
    ----------
    .. [1] H. Shimazaki and S. Shinomoto, "Kernel Bandwidth Optimization in
           Spike Rate Estimation," in Journal of Computational Neuroscience
           29(1-2): 171–182, 2010 http://dx.doi.org/10.1007/s10827-009-0180-4
    """

    def CostFunction(y_hist, N, t, dt, optws, WIN, WinFunc, g):

        L = y_hist.size
        optwv = np.zeros((L,))
        for k in range(L):
            gs = optws[:, k] / WIN
            if g > np.max(gs):
                optwv[k] = np.min(WIN)
            else:
                if g < min(gs):
                    optwv[k] = np.max(WIN)
                else:
                    idx = np.max(np.nonzero(gs >= g))
                    optwv[k] = g * WIN[idx]

        # Nadaraya-Watson kernel regression
        optwp = np.zeros((L,))
        for k in range(L):
            if WinFunc == 'Boxcar':
                Z = Boxcar(t[k] - t, optwv / g)
            elif WinFunc == 'Laplace':
                Z = Laplace(t[k] - t, optwv / g)
            elif WinFunc == 'Cauchy':
                Z = Cauchy(t[k] - t, optwv / g)
            else:  # WinFunc == 'Gauss'
                Z = Gauss(t[k] - t, optwv / g)
            optwp[k] = np.sum(optwv * Z) / np.sum(Z)

        # speed-optimized baloon estimator
        idx = y_hist.nonzero()
        y_hist_nz = y_hist[idx]
        t_nz = t[idx]
        yv = np.zeros((L,))
        for k in range(L):
            yv[k] = np.sum(y_hist_nz * dt * Gauss(t[k] - t_nz, optwp[k]))
        yv = yv * N / np.sum(yv * dt)

        # cost function of estimated kernel
        cg = yv ** 2 - 2 * yv * y_hist + 2 / (2 * np.pi) ** 0.5 / optwp * y_hist
        Cg = np.sum(cg * dt)

        return Cg, yv, optwp

    # set argument 't' if not provided
    if tin is None:
        T = np.max(x) - np.min(x)
        dx = np.sort(np.diff(np.sort(x)))
        dt_samp = dx[np.nonzero(dx)][0]
        tin = np.linspace(np.min(x), np.max(x), min(np.ceil(T / dt_samp), 1e3))
        t = tin
        x_ab = x[(x >= min(tin)) & (x <= max(tin))]
    else:
        T = np.max(x) - np.min(x)
        x_ab = x[(x >= min(tin)) & (x <= max(tin))]
        dx = np.sort(np.diff(np.sort(x)))
        dt_samp = dx[np.nonzero(dx)][0]
        if dt_samp > min(np.diff(tin)):
            t = np.linspace(min(tin), max(tin), min(np.ceil(T / dt_samp), 1e3))
        else:
            t = tin

    # calculate delta t
    dt = min(np.diff(t))

    # create the finest histogram
    thist = np.concatenate((t, (t[-1]+dt)[np.newaxis]))
    y_hist = np.histogram(x_ab, thist-dt/2)[0] / dt
    L = y_hist.size
    N = sum(y_hist * dt).astype(np.float)

    # initialize window sizes
    W = logexp(np.linspace(ilogexp(5 * dt), ilogexp(T), M))

    # compute local cost functions
    c = np.zeros((M, L))
    for j in range(M):
        w = W[j]
        yh = fftkernel(y_hist, w / dt)
        c[j, :] = yh**2 - 2 * yh * y_hist + 2 / (2 * np.pi)**0.5 / w * y_hist

    # initialize optimal ws
    optws = np.zeros((M, L))
    for i in range(M):
        Win = W[i]
        C_local = np.zeros((M, L))
        for j in range(M):
            C_local[j, :] = fftkernelWin(c[j, :], Win / dt, WinFunc)
        n = np.argmin(C_local, axis=0)
        optws[i, :] = W[n]

    # golden section search for stiffness parameter of variable bandwidths
    k = 0
    gs = np.zeros((30, 1))
    C = np.zeros((30, 1))
    tol = 1e-5
    a = 1e-12
    b = 1
    phi = (5**0.5 + 1) / 2
    c1 = (phi - 1) * a + (2 - phi) * b
    c2 = (2 - phi) * a + (phi - 1) * b
    f1 = CostFunction(y_hist, N, t, dt, optws, W, WinFunc, c1)[0]
    f2 = CostFunction(y_hist, N, t, dt, optws, W, WinFunc, c2)[0]
    while (np.abs(b-a) > tol * (abs(c1) + abs(c2))) & (k < 30):
        if f1 < f2:
            b = c2
            c2 = c1
            c1 = (phi - 1) * a + (2 - phi) * b
            f2 = f1
            f1, yv1, optwp1 = CostFunction(y_hist, N, t, dt, optws, W,
                                           WinFunc, c1)
            yopt = yv1 / np.sum(yv1 * dt)
            optw = optwp1
        else:
            a = c1
            c1 = c2
            c2 = (2 - phi) * a + (phi - 1) * b
            f1 = f2
            f2, yv2, optwp2 = CostFunction(y_hist, N, t, dt, optws, W,
                                           WinFunc, c2)
            yopt = yv2 / np.sum(yv2 * dt)
            optw = optwp2

        # capture estimates and increment iteration counter
        gs[k] = c1
        C[k] = f1
        k = k + 1

    # discard unused entries in gs, C
    gs = gs[0:k]
    C = C[0:k]

    # estimate confidence intervals by bootstrapping
    nbs = np.asarray(nbs)
    yb = np.zeros((nbs, tin.size))
    for i in range(nbs):
        Nb = np.random.poisson(lam=N)
        idx = np.random.randint(0, N, Nb)
        xb = x_ab[idx]
        thist = np.concatenate((t, (t[-1]+dt)[np.newaxis]))
        y_histb = np.histogram(xb, thist - dt / 2)[0]
        idx = y_histb.nonzero()
        y_histb_nz = y_histb[idx]
        t_nz = t[idx]
        yb_buf = np.zeros((L, ))
        for k in range(L):
            yb_buf[k] = np.sum(y_histb_nz * Gauss(t[k] - t_nz, optw[k])) / Nb
        yb_buf = yb_buf / np.sum(yb_buf * dt)
        yb[i, :] = np.interp(tin, t, yb_buf)
    ybsort = np.sort(yb, axis=0)
    y95b = ybsort[np.int(np.floor(0.05 * nbs)), :]
    y95u = ybsort[np.int(np.floor(0.95 * nbs)), :]
    confb95 = np.concatenate((y95b[np.newaxis], y95u[np.newaxis]), axis=0)

    # return outputs
    y = np.interp(tin, t, yopt)
    optw = np.interp(tin, t, optw)
    t = tin

    return y, t, optw, gs, C, confb95, yb

sigfuncs

minimum_jerk(x, a0=0, af=1, degree=1, duration=None)

A 1-D trajectory that minimizes jerk (3rd time-derivative of position). A minimum jerk trajectory is considered "smooth" and to be a feature of natural limb movements. https://storage.googleapis.com/wzukusers/user-31382847/documents/5a7253343814f4Iv6Hnt/minimumjerk.pdf

A minimum-jerk trajectory is defined by:

trajectory = a0 + (af - a0) * (10(dx^3) - 15(dx^4) + 6(dx^5))
where a0 is the resting position, and af is the finishing position.

duration is assumed to be the extent of x but it may be overidden.

Parameters:

Name Type Description Default
x ndarray required
a0 0
af 1
degree 1
duration float

Total duration of x in seconds. If None (default), calculated duration from x.

None

Returns:

Type Description
ndarray

a0 + (af - a0) * (10(dx.^3) - 15(dx.^4) + 6*(dx.^5))

Source code in indl/misc/sigfuncs.py
def minimum_jerk(x: np.ndarray, a0=0, af=1, degree=1, duration=None) -> np.ndarray:
    r"""
    A 1-D trajectory that minimizes jerk (3rd time-derivative of position).
    A minimum jerk trajectory is considered "smooth" and to be a feature of natural limb movements.
    https://storage.googleapis.com/wzukusers/user-31382847/documents/5a7253343814f4Iv6Hnt/minimumjerk.pdf

    A minimum-jerk trajectory is defined by:
    ```math
    trajectory = a0 + (af - a0) * (10(dx^3) - 15(dx^4) + 6(dx^5))
    ```
    where a0 is the resting position, and af is the finishing position.

    duration is assumed to be the extent of x but it may be overidden.
    Args:
        x:
        a0:
        af:
        degree:
        duration (float): Total duration of x in seconds. If None (default), calculated duration from x.

    Returns:
        a0 + (af - a0) * (10*(dx.^3) - 15*(dx.^4) + 6*(dx.^5))
    """
    x = np.array(x)[:, None]
    assert(x.size == x.shape[0]), f"x must be 1D trajectory, found {x.shape}"

    a0 = np.atleast_2d(a0)
    af = np.atleast_2d(af)

    if duration is None:

        sorted_x = np.sort(x, axis=0)
        dx = np.diff(x, axis=0)
        duration = sorted_x[-1] - sorted_x[0] + np.min(dx)

    dx = x / duration

    if degree not in [1, 2]:
        # Default - no derivative
        k0 = a0
        x1 = 10 * dx**3
        x2 = -15 * dx**4
        x3 = 6 * dx**5
    elif degree == 1:
        # Velocity
        k0 = np.zeros_like(a0)
        x1 = 30 * dx**2
        x2 = -60 * dx**3
        x3 = 30 * dx**4
    elif degree == 2:
        # Acceleration
        k0 = np.zeros_like(a0)
        x1 = 60 * dx
        x2 = -180 * dx**2
        x3 = 120 * dx**3

    return k0 + (af - a0) * (x1 + x2 + x3)

sigmoid(x, A=0, K=1, C=1, Q=1, B=1, v=1, x_offset=0)

Returns f(x) = A + (K - A) / ((C + Q * np.exp(-B(x - x_offset)))*(1/v)) This is a generalized logistic function. The default arguments reduce this to a simple sigmoid: f(x) = 1 / (1 + np.exp(-x))

Parameters:

Name Type Description Default
x np.ndarray required
A float 0
K float 1
C float 1
Q float 1
B float 1
v float 1
x_offset 0

Returns:

Type Description
np.ndarray

A + (K - A) / ((C + Q * np.exp(-B(x - x_offset)))*(1/v))

Source code in indl/misc/sigfuncs.py
def sigmoid(x, A=0, K=1, C=1, Q=1, B=1, v=1, x_offset=0):
    """
    Returns f(x) = A + (K - A) / ((C + Q * np.exp(-B*(x - x_offset)))**(1/v))
    This is a generalized logistic function.
    The default arguments reduce this to a simple sigmoid:
        f(x) = 1 / (1 + np.exp(-x))
    Args:
        x (np.ndarray):
        A (float):
        K (float):
        C (float):
        Q (float):
        B (float):
        v (float):
        x_offset:

    Returns:
        np.ndarray: A + (K - A) / ((C + Q * np.exp(-B*(x - x_offset)))**(1/v))
    """
    y = A + (K - A) / ((C + Q * np.exp(-B*(x - x_offset)))**(1/v))
    return y