python类diff()的实例源码

process_hdf.py 文件源码 项目:pyrsss 作者: butala 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def fill_nans(df, delta=None):
    """
    """
    if not delta:
        dt_diff = NP.diff(df.index.values)
        delta_timedelta64 = min(dt_diff)
        delta_seconds = delta_timedelta64 / NP.timedelta64(1, 's')
        delta = timedelta(seconds=delta_seconds)
    logger.info('Using delta = {} (s)'.format(delta.total_seconds()))
    index_new = PD.date_range(start=df.index[0],
                              end=df.index[-1],
                              freq=delta)
    missing = sorted(set(index_new) - set(df.index))
    if missing:
        logger.warning('Missing time indices (filled by NaNs):')
        for x in missing:
            logger.warning(x)
    return df.reindex(index_new, copy=False), delta
utils.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def numpy_groupby(values, keys):
    """ Group a collection of numpy arrays by key arrays.
        Yields (key_tuple, view_tuple) where key_tuple is the key grouped on and view_tuple is a tuple of views into the value arrays.
          values: tuple of arrays to group
          keys: tuple of sorted, numeric arrays to group by """

    if len(values) == 0:
        return
    if len(values[0]) == 0:
        return

    for key_array in keys:
        assert len(key_array) == len(keys[0])
    for value_array in values:
        assert len(value_array) == len(keys[0])

    # The indices where any of the keys differ from the previous key become group boundaries
    key_change_indices = np.logical_or.reduce(tuple(np.concatenate(([1], np.diff(key))) != 0 for key in keys))
    group_starts = np.flatnonzero(key_change_indices)
    group_ends = np.roll(group_starts, -1)
    group_ends[-1] = len(keys[0])

    for group_start, group_end in itertools.izip(group_starts, group_ends):
        yield tuple(key[group_start] for key in keys), tuple(value[group_start:group_end] for value in values)
convert_data.py 文件源码 项目:aapm_thoracic_challenge 作者: xf4j 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_labels(contours, shape, slices):
    z = [np.around(s.ImagePositionPatient[2], 1) for s in slices]
    pos_r = slices[0].ImagePositionPatient[1]
    spacing_r = slices[0].PixelSpacing[1]
    pos_c = slices[0].ImagePositionPatient[0]
    spacing_c = slices[0].PixelSpacing[0]

    label_map = np.zeros(shape, dtype=np.float32)
    for con in contours:
        num = ROI_ORDER.index(con['name']) + 1
        for c in con['contours']:
            nodes = np.array(c).reshape((-1, 3))
            assert np.amax(np.abs(np.diff(nodes[:, 2]))) == 0
            z_index = z.index(np.around(nodes[0, 2], 1))
            r = (nodes[:, 1] - pos_r) / spacing_r
            c = (nodes[:, 0] - pos_c) / spacing_c
            rr, cc = polygon(r, c)
            label_map[z_index, rr, cc] = num

    return label_map
spikedetection.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak)
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def __test_ks(self,x):
        x = x[~np.isnan(x)]
        n = x.size
        x.sort()
        yCDF = np.arange(1,n+1)/float(n)
        notdup = np.hstack([np.diff(x,1),[1]])
        notdup = notdup>0
        x_expcdf = x[notdup]
        y_expcdf = np.hstack([[0],yCDF[notdup]])
        zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
        mu = 0
        sigma = 1
        theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))

        delta1 = y_expcdf[:-1]-theocdf
        delta2 = y_expcdf[1:]-theocdf
        deltacdf = np.abs(np.hstack([delta1,delta2]))
        KSmax = deltacdf.max()
        return KSmax
spikedetection.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __detect_spike_peak(self,ang_data,Thr,peak_before,peak_after):
        if Thr < 0:
            dd_0 = np.where(ang_data<Thr)[0]
        elif Thr >=0:
            dd_0 = np.where(ang_data>=Thr)[0]
        dd_1 = np.diff(dd_0,n=1)
        dd_2 = np.where(dd_1 > 1)[0]+1
        dd_3 = np.split(dd_0,dd_2)
        spike_peak = []
        if Thr < 0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmin()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        elif Thr >=0:
            for ite in dd_3:
                if ite.size:
                    potent_peak = ite[ang_data[ite].argmax()]
                    if (potent_peak + peak_after <= ang_data.shape[0]) and (potent_peak - peak_before >= 0):
                        spike_peak.append(potent_peak)
        return np.array(spike_peak)
spikesorting.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __test_ks(self,x):
        x = x[~np.isnan(x)]
        n = x.size
        x.sort()
        yCDF = np.arange(1,n+1)/float(n)
        notdup = np.hstack([np.diff(x,1),[1]])
        notdup = notdup>0
        x_expcdf = x[notdup]
        y_expcdf = np.hstack([[0],yCDF[notdup]])
        zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
        mu = 0
        sigma = 1
        theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))

        delta1 = y_expcdf[:-1]-theocdf
        delta2 = y_expcdf[1:]-theocdf
        deltacdf = np.abs(np.hstack([delta1,delta2]))
        KSmax = deltacdf.max()
        return KSmax
helpers.py 文件源码 项目:inqbus.rainflow 作者: Inqbus 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_extrema(data):
    # find extrema by finding indexes where diff changes sign
    data_diff = np.diff(data)
    asign = np.sign(data_diff)
    signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)

    # first and last value is always a local extrema
    signchange[0] = 1

    # last value is missing because the diff-array is 1 value shorter than the
    # input array so we have to add it again
    signchange = np.append(signchange, np.array([1]))

    calc_data = data[np.where(signchange != 0)]

    return calc_data
e2hdf.py 文件源码 项目:pyrsss 作者: butala 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def apply_emtf(df_E,
               df_B,
               emtf_key,
               index,
               extrapolate0=True):
    """
    Apply the EMTF associated with *emtf_key* to magnetometer data
    found in *df_B* and store result to *df_E*. Use USArray .xml
    repository information :class:`Index` to process the 3-D EMTFs.
    """
    logger.info('applying transfer function {}'.format(emtf_key))
    interval = NP.diff(df_B.index.values[:2])[0] / NP.timedelta64(1, 's')
    Bx = df_B.B_X.values
    By = df_B.B_Y.values
    if emtf_key.startswith('USArray'):
        xml_fname = index[emtf_key][1]
        Ex, Ey = tf_3D(Bx, By, interval, xml_fname, extrapolate0=extrapolate0)
    else:
        Ex, Ey = tf_1D(Bx, By, interval, emtf_key)
    df_E[emtf_key + '_X'] = Ex
    df_E[emtf_key + '_Y'] = Ey
    return df_E
tfinterp.py 文件源码 项目:onsager_deep_learning 作者: mborgerding 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def interp1d_(xin_,xp,yp_):
    """
    Interpolate a uniformly sampled piecewise linear function. Mapping elements
    from xin_ to the result.  Input values will be clipped to range of xp.
        xin_ :  input tensor (real)
        xp : x grid (constant -- must be a 1d numpy array, uniformly spaced)
        yp_ : tensor of the result values at the gridpoints xp
    """
    import tensorflow as tf
    x_ = tf.clip_by_value(xin_,xp.min(),xp.max())
    dx = xp[1]-xp[0]
    assert len(xp.shape)==1,'only 1d interpolation'
    assert xp.shape[0]==int(yp_.get_shape()[0])
    assert abs(np.diff(xp)/dx - 1.0).max() < 1e-6,'must be uniformly sampled'

    newshape = [  ]
    x1_ = tf.expand_dims(x_,-1)
    dt = yp_.dtype
    wt_ = tf.maximum(tf.constant(0.,dtype=dt), 1-abs(x1_ - tf.constant(xp,dtype=dt))/dx  )
    y_ = tf.reduce_sum(wt_ * yp_,axis=-1)
    return y_
DCIP_overburden_PseudoSection.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def PseudoSectionWidget(survey, flag):
    if flag == "PoleDipole":
        ntx, nmax = xr.size-2, 8
        dxr = np.diff(xr)
    elif flag == "DipolePole":
        ntx, nmax = xr.size-1, 7
        dxr = xr
    elif flag == "DipoleDipole":
        ntx, nmax = xr.size-3, 8
        dxr = np.diff(xr)
    xzlocs = getPseudoLocs(dxr, ntx, nmax, flag)
    PseudoSectionPlot = lambda i,j,flag: PseudoSectionPlotfnc(i, j, survey, flag)
    return widgetify(PseudoSectionPlot,
                     i=IntSlider(min=0, max=ntx-1, step=1, value=0),
                     j=IntSlider(min=0, max=nmax-1, step=1, value=0),
                     flag=ToggleButtons(options=['DipoleDipole', 'PoleDipole', 'DipolePole'],
                                        description='Array Type'),)
Quant_Indicators.py 文件源码 项目:QTS_Research 作者: geome-mitbbs 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def rsi(obj, start=-14, end=-1, price_feature='Close'):
    if isinstance(obj, str):
        obj = prices(obj, start, end, price_feature)
        start = 0
        end = -1

    if end < 0:
        end += len(obj)
    if start < 0:
        start += len(obj)

    _data = np.diff(obj[start: (end + 1)])
    len_gain = len(_data[_data > 0.0])
    len_loss = len(_data[_data < 0.0])
    if len_gain == 0 or len_loss == 0:
        return 50
    average_gain = np.mean(_data[_data > 0.0])
    average_loss = np.abs(np.mean(_data[_data < 0.0]))
    first_rs = average_gain / average_loss
    rsi = 100 - 100 / (1 + first_rs)

    return rsi
pycubicspline.py 文件源码 项目:pycubicspline 作者: AtsushiSakai 项目源码 文件源码 阅读 58 收藏 0 点赞 0 评论 0
def __init__(self, x, y):
        self.b, self.c, self.d, self.w = [], [], [], []

        self.x = x
        self.y = y

        self.nx = len(x)  # dimension of x
        h = np.diff(x)

        # calc coefficient c
        self.a = [iy for iy in y]

        # calc coefficient c
        A = self.__calc_A(h)
        B = self.__calc_B(h)
        self.c = np.linalg.solve(A, B)
        #  print(self.c1)

        # calc spline coefficient b and d
        for i in range(self.nx - 1):
            self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
            tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
                (self.c[i + 1] + 2.0 * self.c[i]) / 3.0
            self.b.append(tb)
spg.py 文件源码 项目:muesr 作者: bonfus 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def unique_reflections(self, hkl):
        """Returns a subset *hkl* containing only the symmetry-unique
        reflections.

        Example:

        >>> from ase.lattice.spacegroup import Spacegroup
        >>> sg = Spacegroup(225)  # fcc
        >>> sg.unique_reflections([[ 2,  0,  0], 
        ...                        [ 0, -2,  0], 
        ...                        [ 2,  2,  0], 
        ...                        [ 0, -2, -2]])
        array([[2, 0, 0],
               [2, 2, 0]])
        """
        hkl = np.array(hkl, dtype=int, ndmin=2)
        hklnorm = self.symmetry_normalised_reflections(hkl)
        perm = np.lexsort(hklnorm.T)
        iperm = perm.argsort()
        xmask = np.abs(np.diff(hklnorm[perm], axis=0)).any(axis=1)
        mask = np.concatenate(([True], xmask))
        imask = mask[iperm]
        return hkl[imask]
piwall.py 文件源码 项目:piwall-cvtools 作者: infinnovation 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def contour_to_monitor_coords(screenCnt):
    '''Apply pyimagesearch algorithm to identify tl,tr,br,bl points from a contour'''
    # now that we have our screen contour, we need to determine
    # the top-left, top-right, bottom-right, and bottom-left
    # points so that we can later warp the image -- we'll start
    # by reshaping our contour to be our finals and initializing
    # our output rectangle in top-left, top-right, bottom-right,
    # and bottom-left order
    pts = screenCnt.reshape(4, 2)
    rect = np.zeros((4, 2), dtype = "float32")

    # the top-left point has the smallest sum whereas the
    # bottom-right has the largest sum
    s = pts.sum(axis = 1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]

    # compute the difference between the points -- the top-right
    # will have the minumum difference and the bottom-left will
    # have the maximum difference
    diff = np.diff(pts, axis = 1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]

    return rect
TimeControl.py 文件源码 项目:astk 作者: openalea-incubator 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __call__(self, time_sequence, weather_data):
        """ Compute thermal time accumulation over time_sequence

        :Parameters:
        ----------
        - `time_sequence` (panda dateTime index)
            A sequence of TimeStamps indicating the dates of all elementary time steps of the simulation
        - weather (alinea.astk.Weather instance)
            A Weather database

        """    
        try:
            Tair = weather_data.temperature_air[time_sequence]
        except:
            #strange extract needed on visualea 1.0 (to test again with ipython in visualea)
            T_data = weather_data[['temperature_air']]
            Tair = numpy.array([float(T_data.loc[d]) for d in time_sequence])
        Tcut = numpy.maximum(numpy.zeros_like(Tair), Tair - self.Tbase)
        days = [0] + [((t - time_sequence[0]).total_seconds()+ 3600) / 3600 / 24 for t in time_sequence]
        dt = numpy.diff(days).tolist()
        return numpy.cumsum(Tcut * dt)

# functional call for nodes
functions.py 文件源码 项目:seniority_list 作者: rubydatasystems 项目源码 文件源码 阅读 59 收藏 0 点赞 0 评论 0
def monotonic(sequence):
    '''test for stricly increasing array-like input
    May be used to determine when need for no bump,
    no flush routine is no longer required.
    If test is true, and there are no job changes,
    special rights, or furlough recalls,
    then a straight stovepipe job assignment routine may
    be implemented (fast).
    input
        sequence
            array-like input (list or numpy array ok)
    '''
    seq_diff = np.diff(sequence)
    return np.all(seq_diff >= 0)


# GET_MONTH_SLICE
megafacade.py 文件源码 项目:facade-segmentation 作者: jfemiani 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _cut_windows_vertically(self, door_top, roof_top, sky_sig, win_strip):
        win_sig = np.percentile(win_strip, 85, axis=1)
        win_sig[sky_sig > 0.5] = 0
        if win_sig.max() > 0:
            win_sig /= win_sig.max()
        win_sig[:roof_top] = 0
        win_sig[door_top:] = 0
        runs, starts, values = run_length_encode(win_sig > 0.5)
        win_heights = runs[values]
        win_tops = starts[values]
        if len(win_heights) > 0:
            win_bottom = win_tops[-1] + win_heights[-1]
            win_top = win_tops[0]
            win_vertical_spacing = np.diff(win_tops).mean() if len(win_tops) > 1 else 0
        else:
            win_bottom = win_top = win_vertical_spacing = -1

        self.top = int(win_top)
        self.bottom = int(win_bottom)
        self.vertical_spacing = int(win_vertical_spacing)
        self.vertical_scores = make_list(win_sig)
        self.heights = np.array(win_heights)
        self.tops = np.array(win_tops)
plot_gradients.py 文件源码 项目:IDNNs 作者: ravidziv 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calc_mean_var_loss(epochsInds,loss_train):
    #Loss train is in dimension # epochs X #batchs
    num_of_epochs = loss_train.shape[0]
    #Average over the batchs
    loss_train_mean = np.mean(loss_train,1)
    #The diff divided by the sampled indexes
    d_mean_loss_to_dt = np.sqrt(np.abs(np.diff(loss_train_mean) / np.diff(epochsInds[:])))
    var_loss = []
    #Go over the epochs
    for epoch_index in range(num_of_epochs):
        #The loss for the specpic epoch
        current_loss = loss_train[epoch_index, :]
        #The derivative between the batchs
        current_loss_dt = np.diff(current_loss)
        #The mean of his derivative
        average_loss = np.mean(current_loss_dt)
        current_loss_minus_mean = current_loss_dt- average_loss
        #The covarince between the batchs
        cov_mat = np.dot(current_loss_minus_mean[:, None], current_loss_minus_mean[None, :])
        # The trace of the cov matrix
        trac_cov = np.trace(cov_mat)
        var_loss.append(trac_cov)
    return np.array(var_loss), d_mean_loss_to_dt
Inverse.py 文件源码 项目:PyMieScatt 作者: bsumlin 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def find_intersections(A,B):
  arrayMinimum = lambda x1, x2: np.where(x1<x2, x1, x2)
  arrayMaximum = lambda x1, x2: np.where(x1>x2, x1, x2)
  arrayAll = lambda abools: np.dstack(abools).all(axis=2)
  slope = lambda line: (lambda d: d[:,1]/d[:,0])(np.diff(line, axis=0))

  x11, x21 = np.meshgrid(A[:-1, 0], B[:-1, 0])
  x12, x22 = np.meshgrid(A[1:, 0], B[1:, 0])
  y11, y21 = np.meshgrid(A[:-1, 1], B[:-1, 1])
  y12, y22 = np.meshgrid(A[1:, 1], B[1:, 1])

  m1, m2 = np.meshgrid(slope(A), slope(B))
  # Here we use masked arrays to properly treat the rare case where a line segment is perfectly vertical
  _m1 = np.ma.masked_array(m1,m1==-np.inf)
  _m2 = np.ma.masked_array(m2,m2==-np.inf)
  yi = (_m1*(x21-x11-y21/_m2)+y11)/(1-_m1/_m2)
  xi = (yi-y21)/_m2+x21

  xconds = (arrayMinimum(x11, x12) < xi, xi <= arrayMaximum(x11, x12),
            arrayMinimum(x21, x22) < xi, xi <= arrayMaximum(x21, x22) )
  yconds = (arrayMinimum(y11, y12) < yi, yi <= arrayMaximum(y11, y12),
            arrayMinimum(y21, y22) < yi, yi <= arrayMaximum(y21, y22) )

  return xi[arrayAll(xconds)], yi[arrayAll(yconds)]
Inverse.py 文件源码 项目:PyMieScatt 作者: bsumlin 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def find_intersections(A,B):
  arrayMinimum = lambda x1, x2: np.where(x1<x2, x1, x2)
  arrayMaximum = lambda x1, x2: np.where(x1>x2, x1, x2)
  arrayAll = lambda abools: np.dstack(abools).all(axis=2)
  slope = lambda line: (lambda d: d[:,1]/d[:,0])(np.diff(line, axis=0))

  x11, x21 = np.meshgrid(A[:-1, 0], B[:-1, 0])
  x12, x22 = np.meshgrid(A[1:, 0], B[1:, 0])
  y11, y21 = np.meshgrid(A[:-1, 1], B[:-1, 1])
  y12, y22 = np.meshgrid(A[1:, 1], B[1:, 1])

  m1, m2 = np.meshgrid(slope(A), slope(B))
  # Here we use masked arrays to properly treat the rare case where a line segment is perfectly vertical
  _m1 = np.ma.masked_array(m1,m1==-np.inf)
  _m2 = np.ma.masked_array(m2,m2==-np.inf)
  yi = (_m1*(x21-x11-y21/_m2)+y11)/(1-_m1/_m2)
  xi = (yi-y21)/_m2+x21

  xconds = (arrayMinimum(x11, x12) < xi, xi <= arrayMaximum(x11, x12),
            arrayMinimum(x21, x22) < xi, xi <= arrayMaximum(x21, x22) )
  yconds = (arrayMinimum(y11, y12) < yi, yi <= arrayMaximum(y11, y12),
            arrayMinimum(y21, y22) < yi, yi <= arrayMaximum(y21, y22) )

  return xi[arrayAll(xconds)], yi[arrayAll(yconds)]
iv.py 文件源码 项目:pystudio 作者: satorchi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def Pbias(self,TES):
    '''
    find the Pbias at 90% Rn
    '''    
    filterinfo=self.filterinfo(TES)
    if filterinfo==None:return None

    Rn_ratio=self.Rn_ratio(TES)
    if not isinstance(Rn_ratio,np.ndarray):return None

    istart,iend=self.selected_iv_curve(TES)

    Rn_ratio=Rn_ratio[istart:iend]
    Ptes=self.Ptes(TES)
    Ptes=Ptes[istart:iend]

    # check that Rn_ratio is increasing
    increasing=np.diff(Rn_ratio).mean()
    if increasing<0:
        Pbias=np.interp(90., np.flip(Rn_ratio,0), np.flip(Ptes,0))
    else:
        Pbias=np.interp(90., Rn_ratio, Ptes)

    return Pbias
var.py 文件源码 项目:pyflux 作者: RJT1990 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def neg_loglik(self,beta):
        """ Creates the negative log-likelihood of the model

        Parameters
        ----------
        beta : np.array
            Contains untransformed starting values for latent variables

        Returns
        ----------
        The negative logliklihood of the model
        """     

        mu, Y = self._model(beta)

        if self.use_ols_covariance is False:
            cm = self.custom_covariance(beta)
        else:
            cm = self.ols_covariance()

        diff = Y.T - mu.T
        ll1 =  -(mu.T.shape[0]*mu.T.shape[1]/2.0)*np.log(2.0*np.pi) - (mu.T.shape[0]/2.0)*np.linalg.slogdet(cm)[1]
        inverse = np.linalg.pinv(cm)

        return var_likelihood(ll1, mu.T.shape[0], diff, inverse)
market_variables.py 文件源码 项目:AlphaPy 作者: ScottFreeLLC 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def pchange2(f, c1, c2):
    r"""Calculate the percentage change between two variables.

    Parameters
    ----------
    f : pandas.DataFrame
        Dataframe containing the two columns ``c1`` and ``c2``.
    c1 : str
        Name of the first column in the dataframe ``f``.
    c2 : str
        Name of the second column in the dataframe ``f``.

    Returns
    -------
    new_column : pandas.Series (float)
        The array containing the new feature.

    """
    new_column = f[c1] / f[c2] - 1.0
    return new_column


#
# Function diff
#
market_variables.py 文件源码 项目:AlphaPy 作者: ScottFreeLLC 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def diff(f, c, n = 1):
    r"""Calculate the n-th order difference for the given variable.

    Parameters
    ----------
    f : pandas.DataFrame
        Dataframe containing the column ``c``.
    c : str
        Name of the column in the dataframe ``f``.
    n : int
        The number of times that the values are differenced.

    Returns
    -------
    new_column : pandas.Series (float)
        The array containing the new feature.

    """
    new_column = np.diff(f[c], n)
    return new_column


#
# Function down
#
units.py 文件源码 项目:pytac 作者: willrogers 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, x, y, post_eng_to_phys=unit_function, pre_phys_to_eng=unit_function):
        """ PChip interpolation for converting between physics and engineering units.

        Args:
            x(list): A list of points on the x axis. These must be in increasing order
                for the interpolation to work. Otherwise, a ValueError is raised.
            y(list): A list of points on the y axis. These must be in increasing or
                decreasing order. Otherwise, a ValueError is raised.

        Raises:
            ValueError: An error occured when the given y coefficients are neither in
            increasing or decreasing order.
        """
        super(self.__class__, self).__init__(post_eng_to_phys, pre_phys_to_eng)
        self.x = x
        self.y = y
        self.pp = PchipInterpolator(x, y)

        diff = numpy.diff(y)
        if not ((numpy.all(diff > 0)) or (numpy.all((diff < 0)))):
            raise ValueError("Given coefficients must be monotonically"
                             "decreasing.")
common.py 文件源码 项目:PySAT 作者: USGS-Astrogeology 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def __init__(self, signal, smoothness_param, deriv_order=1):
        self.y = signal
        assert deriv_order > 0, 'deriv_order must be an int > 0'
        # Compute the fixed derivative of identity (D).
        d = np.zeros(deriv_order * 2 + 1, dtype=int)
        d[deriv_order] = 1
        d = np.diff(d, n=deriv_order)
        n = self.y.shape[0]
        k = len(d)
        s = float(smoothness_param)

        # Here be dragons: essentially we're faking a big banded matrix D,
        # doing s * D.T.dot(D) with it, then taking the upper triangular bands.
        diag_sums = np.vstack([
            np.pad(s * np.cumsum(d[-i:] * d[:i]), ((k - i, 0),), 'constant')
            for i in range(1, k + 1)])
        upper_bands = np.tile(diag_sums[:, -1:], n)
        upper_bands[:, :k] = diag_sums
        for i, ds in enumerate(diag_sums):
            upper_bands[i, -i - 1:] = ds[::-1][:i + 1]
        self.upper_bands = upper_bands
BootstrapReserve.py 文件源码 项目:chainladder-python 作者: jbogaardt 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __model_form(self, tri_array):
        w = np.nan_to_num(self.weights/tri_array[:,:,:-1]**(2-self.alpha))
        x = np.nan_to_num(tri_array[:,:,:-1]*(tri_array[:,:,1:]*0+1))
        y = np.nan_to_num(tri_array[:,:,1:])
        LDF = np.sum(w*x*y,axis=1)/np.sum(w*x*x,axis=1)
        #Chainladder (alpha=1/delta=1)
        #LDF = np.sum(np.nan_to_num(tri_array[:,:,1:]),axis=1) / np.sum(np.nan_to_num((tri_array[:,:,1:]*0+1)*tri_array[:,:,:-1]),axis=1)
        #print(LDF.shape)
        # assumes no tail
        CDF = np.append(np.cumprod(LDF[:,::-1],axis=1)[:,::-1],np.array([1]*tri_array.shape[0]).reshape(tri_array.shape[0],1),axis=1)    
        latest = np.flip(tri_array,axis=1).diagonal(axis1=1,axis2=2)   
        ults = latest*CDF
        lu = list(ults)
        lc = list(CDF)
        exp_cum_triangle = np.array([np.flipud(lu[num].reshape(tri_array.shape[2],1).dot(1/lc[num].reshape(1,tri_array.shape[2]))) for num in range(tri_array.shape[0])])
        exp_incr_triangle = np.append(exp_cum_triangle[:,:,0,np.newaxis],np.diff(exp_cum_triangle),axis=2)
        return LDF, CDF, ults, exp_incr_triangle
ex3-self_learning_quant.py 文件源码 项目:sl-quant 作者: danielzak 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def init_state(indata, test=False):
    close = indata['close'].values
    diff = np.diff(close)
    diff = np.insert(diff, 0, 0)
    sma15 = SMA(indata, timeperiod=15)
    sma60 = SMA(indata, timeperiod=60)
    rsi = RSI(indata, timeperiod=14)
    atr = ATR(indata, timeperiod=14)

    #--- Preprocess data
    xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))

    xdata = np.nan_to_num(xdata)
    if test == False:
        scaler = preprocessing.StandardScaler()
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
        joblib.dump(scaler, 'data/scaler.pkl')
    elif test == True:
        scaler = joblib.load('data/scaler.pkl')
        xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
    state = xdata[0:1, 0:1, :]

    return state, xdata, close

#Take Action
ex1-self_learning_quant.py 文件源码 项目:sl-quant 作者: danielzak 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def init_state(data):

    close = data
    diff = np.diff(data)
    diff = np.insert(diff, 0, 0)

    #--- Preprocess data
    xdata = np.column_stack((close, diff))
    xdata = np.nan_to_num(xdata)
    scaler = preprocessing.StandardScaler()
    xdata = scaler.fit_transform(xdata)

    state = xdata[0:1, :]
    return state, xdata

#Take Action


问题


面经


文章

微信
公众号

扫码关注公众号