python类zeros_like()的实例源码

screencp.py 文件源码 项目:OpenAI_Challenges 作者: AlwaysLearningDeeper 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def roi(img,vertices):
    # blank mask:
    mask = np.zeros_like(img)

    # filling pixels inside the polygon defined by "vertices" with the fill color
    cv2.fillPoly(mask, vertices, 255)

    # returning the image only where mask pixels are nonzero
    masked = cv2.bitwise_and(img, mask)
    return masked
kshape.py 文件源码 项目:rca-evaluation 作者: sieve-microservices 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def roll_zeropad(a, shift, axis=None):
    a = np.asanyarray(a)
    if shift == 0: return a
    if axis is None:
        n = a.size
        reshape = True
    else:
        n = a.shape[axis]
        reshape = False
    if np.abs(shift) > n:
        res = np.zeros_like(a)
    elif shift < 0:
        shift += n
        zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
        res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
    else:
        zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
        res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
    if reshape:
        return res.reshape(a.shape)
    else:
        return res
losses_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 69 收藏 0 点赞 0 评论 0
def test_op(self):
    logits = np.random.randn(self.sequence_length, self.batch_size,
                             self.vocab_size)
    logits = logits.astype(np.float32)
    sequence_length = np.array([1, 2, 3, 4])
    targets = np.random.randint(0, self.vocab_size,
                                [self.sequence_length, self.batch_size])
    losses = seq2seq_losses.cross_entropy_sequence_loss(logits, targets,
                                                        sequence_length)

    with self.test_session() as sess:
      losses_ = sess.run(losses)

    # Make sure all losses not past the sequence length are > 0
    np.testing.assert_array_less(np.zeros_like(losses_[:1, 0]), losses_[:1, 0])
    np.testing.assert_array_less(np.zeros_like(losses_[:2, 1]), losses_[:2, 1])
    np.testing.assert_array_less(np.zeros_like(losses_[:3, 2]), losses_[:3, 2])

    # Make sure all losses past the sequence length are 0
    np.testing.assert_array_equal(losses_[1:, 0], np.zeros_like(losses_[1:, 0]))
    np.testing.assert_array_equal(losses_[2:, 1], np.zeros_like(losses_[2:, 1]))
    np.testing.assert_array_equal(losses_[3:, 2], np.zeros_like(losses_[3:, 2]))
RankOrderedAutoencoder.py 文件源码 项目:rank-ordered-autoencoder 作者: paulbertens 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, input_shape, output_shape):
        self.input_shape = input_shape
        self.input = np.zeros((output_shape[0], self.input_shape[0] * self.input_shape[1] *
                               self.input_shape[2]),dtype=np.float32)
        self.output = np.zeros(output_shape, dtype=np.float32)
        self.output_raw = np.zeros_like(self.output)
        self.output_error = np.zeros_like(self.output)
        self.output_average = np.zeros(self.output.shape[1], dtype=np.float32)
        self.weights = np.random.normal(0, np.sqrt(2.0 / (self.output.shape[1] + self.input.shape[1])),
                                        size=(self.input.shape[1], self.output.shape[1])).astype(np.float32)
        self.gradient = np.zeros_like(self.weights)
        self.reconstruction = np.zeros_like(self.weights)
        self.errors = np.zeros_like(self.weights)
        self.output_ranks = np.zeros(self.output.shape[1], dtype=np.int32)
        self.learning_rate = 1
        self.norm_limit = 0.1
train.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def gen_batches(data, n_seqs, n_steps):
    """Create a generator that returns batches of size n_seqs x n_steps."""

    characters_per_batch = n_seqs * n_steps
    n_batches = len(data) // characters_per_batch

    # Keep only enough characters to make full batches
    data = data[:n_batches*characters_per_batch]
    data = data.reshape([n_seqs, -1])

    for n in range(0, data.shape[1], n_steps):
        x = data[:, n:n+n_steps]
        y = np.zeros_like(x)
        y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
        yield x, y

#-------------------------------------------------------------------------------
# Parse commandline
#-------------------------------------------------------------------------------
filter.py 文件源码 项目:guided-filter 作者: lisabug 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def filter(self, p):
        """

        Parameters
        ----------
        p: NDArray
            Filtering input which is 2D or 3D with format
            HW or HWC

        Returns
        -------
        ret: NDArray
            Filtering output whose shape is same with input
        """
        p = to_32F(p)
        if len(p.shape) == 2:
            return self._Filter.filter(p)
        elif len(p.shape) == 3:
            channels = p.shape[2]
            ret = np.zeros_like(p, dtype=np.float32)
            for c in range(channels):
                ret[:, :, c] = self._Filter.filter(p[:, :, c])
            return ret
sun3d_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _process_label(self, fn): 
        """
        TODO: Fix one-indexing to zero-index; 
        retained one-index due to uint8 constraint
        """
        mat = loadmat(fn, squeeze_me=True)
        _labels = mat['seglabel'].astype(np.uint8)
        # _labels -= 1 # (move to zero-index)

        labels = np.zeros_like(_labels)
        for (idx, name) in enumerate(mat['names']): 
            try: 
                value = SUNRGBDDataset.target_hash[name]
            except: 
                value = 0
            mask = _labels == idx+1
            labels[mask] = value
        return self._pad_image(labels)
recognition_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def recall_from_IoU(IoU, samples=500): 
    """
    plot recall_vs_IoU_threshold
    """

    if not (isinstance(IoU, list) or IoU.ndim == 1):
        raise ValueError('IoU needs to be a list or 1-D')
    iou = np.float32(IoU)

    # Plot intersection over union
    IoU_thresholds = np.linspace(0.0, 1.0, samples)
    recall = np.zeros_like(IoU_thresholds)
    for idx, IoU_th in enumerate(IoU_thresholds):
        tp, relevant = 0, 0
        inds, = np.where(iou >= IoU_th)
        recall[idx] = len(inds) * 1.0 / len(IoU)

    return recall, IoU_thresholds 

# =====================================================================
# Generic utility functions for object recognition
# ---------------------------------------------------------------------
core.py 文件源码 项目:dask_gdf 作者: gpuopenanalytics 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def reset_index(self):
        """Reset index to range based
        """
        dfs = self.to_delayed()
        sizes = np.asarray(compute(*map(delayed(len), dfs)))
        prefixes = np.zeros_like(sizes)
        prefixes[1:] = np.cumsum(sizes[:-1])

        @delayed
        def fix_index(df, startpos):
            return df.set_index(np.arange(start=startpos,
                                          stop=startpos + len(df),
                                          dtype=np.intp))

        outdfs = [fix_index(df, startpos)
                  for df, startpos in zip(dfs, prefixes)]
        return from_delayed(outdfs)
layers.py 文件源码 项目:comprehend 作者: Fenugreek 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def recoded_features(self, inputs, layer=-1, inverse_fn=ielu):

        hidden = self.get_hidden_values(inputs, store=True, layer=layer).eval()

        bench = self.get_reconstructed_input(np.zeros_like(hidden),
                                             layer=layer).eval().squeeze()
        if inverse_fn: ibench = inverse_fn(bench)

        results = []
        for h in range(hidden.shape[-1]):
            hidden_h = np.zeros_like(hidden)
            hidden_h[..., h] = hidden[..., h]
            feature = self.get_reconstructed_input(hidden_h, layer=layer).eval().squeeze()
            if inverse_fn:
                iresult = inverse_fn(feature) - ibench
                results.append(self.coders[0].coding(iresult).eval())
            else:
                results.append(feature - bench)

        return np.array(results), bench
_cluster.py 文件源码 项目:ananke 作者: beiko-lab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def zscore(x):
    """Computes the Z-score of a vector x. Removes the mean and divides by the
    standard deviation. Has a failback if std is 0 to return all zeroes.

    Parameters
    ----------
    x: list of int
        Input time-series

    Returns
    -------
    z: list of float
        Z-score normalized time-series
    """
    mean = np.mean(x)
    sd = np.std(x)
    if sd == 0:
        z = np.zeros_like(x)
    else:
        z = (x - mean)/sd
    return z
dqn_utils.py 文件源码 项目:deep-q-learning 作者: alvinwan 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _encode_observation(self, idx):
        end_idx   = idx + 1 # make noninclusive
        start_idx = end_idx - self.frame_history_len
        # this checks if we are using low-dimensional observations, such as RAM
        # state, in which case we just directly return the latest RAM.
        if len(self.obs.shape) == 2:
            return self.obs[end_idx-1]
        # if there weren't enough frames ever in the buffer for context
        if start_idx < 0 and self.num_in_buffer != self.size:
            start_idx = 0
        for idx in range(start_idx, end_idx - 1):
            if self.done[idx % self.size]:
                start_idx = idx + 1
        missing_context = self.frame_history_len - (end_idx - start_idx)
        # if zero padding is needed for missing context
        # or we are on the boundry of the buffer
        if start_idx < 0 or missing_context > 0:
            frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]
            for idx in range(start_idx, end_idx):
                frames.append(self.obs[idx % self.size])
            return np.concatenate(frames, 2)
        else:
            # this optimization has potential to saves about 30% compute time \o/
            img_h, img_w = self.obs.shape[1], self.obs.shape[2]
            return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)
active.py 文件源码 项目:code-uai16 作者: thanhan 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def do_batch_sampling(pp_unl, batch_size):
    """
    return batch_size number of of item, sampled by uncertainty
    each with a label sampled from prob
    """
    n = pp_unl.shape[0]
    uncertain = np.abs(pp_unl[:,0] - 0.5)
    if n < batch_size:
        batch_size = n
    sam_weight = np.exp(20 * (1-uncertain))
    items = np.random.choice(n, batch_size, replace = False, p = sam_weight*1.0 / np.sum(sam_weight) )
    labels = np.zeros_like(items)

    for (i,item) in enumerate(items):
        if np.random.random() < pp_unl[item, 1]: 
            l = 1 
        else:
            l = 0
        labels[i] = l

    return (items, labels)
inference.py 文件源码 项目:deep_srl 作者: luheng 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def viterbi_decode(score, transition_params):
  """ Adapted from Tensorflow implementation.
  Decode the highest scoring sequence of tags outside of TensorFlow.
  This should only be used at test time.
  Args:
    score: A [seq_len, num_tags] matrix of unary potentials.
    transition_params: A [num_tags, num_tags] matrix of binary potentials.
  Returns:
    viterbi: A [seq_len] list of integers containing the highest scoring tag
        indicies.
    viterbi_score: A float containing the score for the Viterbi sequence.
  """
  trellis = numpy.zeros_like(score)
  backpointers = numpy.zeros_like(score, dtype=numpy.int32)
  trellis[0] = score[0]
  for t in range(1, score.shape[0]):
    v = numpy.expand_dims(trellis[t - 1], 1) + transition_params
    trellis[t] = score[t] + numpy.max(v, 0)
    backpointers[t] = numpy.argmax(v, 0)
  viterbi = [numpy.argmax(trellis[-1])]
  for bp in reversed(backpointers[1:]):
    viterbi.append(bp[viterbi[-1]])
  viterbi.reverse()
  viterbi_score = numpy.max(trellis[-1])
  return viterbi, viterbi_score
adma.py 文件源码 项目:Optimizer-cotw 作者: alkaya 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, syn0, syn1):
        # Hyperparameters
        self.__b1 = 0.9
        self.__b2 = 0.999
        self.__learing_rate = 0.001
        self.__epsilon = 1e-8

        # initialize momentum
        self.__m_syn0 = np.zeros_like(syn0)
        self.__m_syn1 = np.zeros_like(syn1)
        self.__v_syn0 = np.zeros_like(syn0)
        self.__v_syn1 = np.zeros_like(syn1)

        # get a copy of weight
        self.__syn0 = copy.deepcopy(syn0)
        self.__syn1 = copy.deepcopy(syn1)
direct_piv.py 文件源码 项目:pypiv 作者: jr7 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, image_a, image_b, window_size=32, search_size=32, distance=16):
        """
        Initialization of the class.

        :param image_a: first image to be evaluated
        :param image_b: second image to be evaluated
        :param int window_size: size of the interrogation window on first image
        :param int search_size: size of the search window on second image
        :param int distance: distance between beginning if first interrogation window and second
        """
        image_a, image_b = self._check_images(image_a, image_b)
        self.grid_spec = GridSpec(image_a.shape, image_a.strides,
                                   window_size, search_size, distance)

        self._correlator = FFTCorrelator(window_size, search_size)
        self._set_images(image_a, image_b)

        self.u = np.zeros(self.grid_spec.get_grid_shape())
        self.v = np.zeros_like(self.u)
        self._grid_creator()
segment.py 文件源码 项目:iFruitFly 作者: AdnanMuhib 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def segmentation(_image, typeOfFruit):
    #_denoisedImage = rank.median(_image, disk(2));
    #_elevationMap = sobel(_denoisedImage);
    #print(_image);
    _elevationMap = sobel(_image);
    #print(_image);
    _marker = np.zeros_like(_image);
    if (typeOfFruit == 'Counting'):
        _marker[_image < 1998] = 1;
        _marker[_image > 61541] = 2;
    elif (typeOfFruit == 'Temperature'):
        #print("Printing Image");
        #print(_image < 10);
        _marker[_image < 30] = 1;
        #print(_marker);
        _marker[_image > 150] = 2;
    #_marker = rank.gradient(_denoisedImage, disk(5)) < 10;
    #_marker = ndi.label(_marker)[0];
    #_elevationMap = rank.gradient(_denoisedImage, disk(2));
    _segmentation = watershed(_elevationMap, _marker);
    #print(_segmentation);
    return _segmentation;
imageSegmentor_v3.py 文件源码 项目:iFruitFly 作者: AdnanMuhib 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def segmentation(_image, typeOfFruit):
    #_denoisedImage = rank.median(_image, disk(2));
    #_elevationMap = sobel(_denoisedImage);
    #print(_image);
    _elevationMap = sobel(_image);
    #print(_image);
    _marker = np.zeros_like(_image);
    if (typeOfFruit == 'Counting'):
        _marker[_image < 1998] = 1;
        _marker[_image > 61541] = 2;
    elif (typeOfFruit == 'Temperature'):
        #print("Printing Image");        
        #print(_image < 10);
        _marker[_image < 30] = 1;
        #print(_marker);
        _marker[_image > 150] = 2;
    #_marker = rank.gradient(_denoisedImage, disk(5)) < 10;
    #_marker = ndi.label(_marker)[0];
    #_elevationMap = rank.gradient(_denoisedImage, disk(2));
    _segmentation = watershed(_elevationMap, _marker);
    #print(_segmentation);
    return _segmentation;
iFruitFly_Testing_weka.py 文件源码 项目:iFruitFly 作者: AdnanMuhib 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def dbFun(_x,_original_vals, f):
    db = DBSCAN(eps=0.3, min_samples=20).fit(_x)
    core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
    core_samples_mask[db.core_sample_indices_] = True

    labels = db.labels_
    #print(labels)
    n_clusters_ = len(set(labels)) - (1 if -1 else 0)
    #gettingCharacteristics(_x, core_samples_mask, labels, n_clusters_,
    #_original_vals)
    print("Wait plotting clusters.....")
    plotCluster(_x, labels, core_samples_mask, n_clusters_, f)
    return

##############################################################################################
# Plotting the cluster after the result of DBSCAN
proc.py 文件源码 项目:PyMDNet 作者: HungWei-Andy 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def overlap_ratio(boxes1, boxes2):
  # find intersection bbox
  x_int_bot = np.maximum(boxes1[:, 0], boxes2[0])
  x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[0] + boxes2[2])
  y_int_bot = np.maximum(boxes1[:, 1], boxes2[1])
  y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[1] + boxes2[3])

  # find intersection area
  dx = x_int_top - x_int_bot
  dy = y_int_top - y_int_bot
  area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))

  # find union
  area_union = boxes1[:,2] * boxes1[:,3] + boxes2[2] * boxes2[3] - area_int

  # find overlap ratio
  ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
  return ratio


###########################################################################
#                          overlap_ratio of two bboxes                    #
###########################################################################
proc.py 文件源码 项目:PyMDNet 作者: HungWei-Andy 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def overlap_ratio_pair(boxes1, boxes2):
  # find intersection bbox
  x_int_bot = np.maximum(boxes1[:, 0], boxes2[:, 0])
  x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[:, 0] + boxes2[:, 2])
  y_int_bot = np.maximum(boxes1[:, 1], boxes2[:, 1])
  y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[:, 1] + boxes2[:, 3])

  # find intersection area
  dx = x_int_top - x_int_bot
  dy = y_int_top - y_int_bot
  area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))

  # find union
  area_union = boxes1[:,2] * boxes1[:,3] + boxes2[:, 2] * boxes2[:, 3] - area_int

  # find overlap ratio
  ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
  return ratio
sphereElectrostatic_example.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_Conductivity(XYZ, sig0, sig1, R):
    """
    Define the conductivity for each point of the space
    """
    x, y, z = XYZ[:, 0], XYZ[:, 1], XYZ[:, 2]
    r_view = r(x, y, z)

    ind0 = (r_view > R)
    ind1 = (r_view <= R)

    assert (ind0 + ind1).all(), 'Some indicies not included'

    Sigma = np.zeros_like(x)

    Sigma[ind0] = sig0
    Sigma[ind1] = sig1

    return Sigma
TDEMPlanewave.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def E_field_from_SheetCurruent(XYZ, srcLoc, sig, t, E0=1., orientation='X', kappa=0., epsr=1.):
    """
        Computing Analytic Electric fields from Plane wave in a Wholespace
        TODO:
            Add description of parameters
    """

    XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
    # Check
    if XYZ.shape[0] > 1 & t.shape[0] > 1:
        raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")

    mu = mu_0*(1+kappa)

    if orientation == "X":
        z = XYZ[:, 2]
        bunja = -E0*(mu*sig)**0.5 * z * np.exp(-(mu*sig*z**2) / (4*t))
        bunmo = 2 * np.pi**0.5 * t**1.5
        Ex = bunja / bunmo
        Ey = np.zeros_like(z)
        Ez = np.zeros_like(z)
        return Ex, Ey, Ez
    else:
        raise NotImplementedError()
TDEMPlanewave.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def H_field_from_SheetCurruent(XYZ, srcLoc, sig, t, E0=1., orientation='X', kappa=0., epsr=1.):
    """
        Plane wave propagating downward (negative z (depth))
    """

    XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
    # Check
    if XYZ.shape[0] > 1 & t.shape[0] > 1:
        raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")

    mu = mu_0*(1+kappa)
    if orientation == "X":
        z = XYZ[:, 2]
        Hx = np.zeros_like(z)
        Hy = E0 * np.sqrt(sig / (np.pi*mu*t))*np.exp(-(mu*sig*z**2) / (4*t))
        Hz = np.zeros_like(z)
        return Hx, Hy, Hz
    else:
        raise NotImplementedError()
MT.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def appres(F, H, sig, chg, taux, c, mu, eps, n):

    Res = np.zeros_like(F)
    Phase = np.zeros_like(F)
    App_ImpZ= np.zeros_like(F, dtype='complex_')

    for i in range(0, len(F)):

        UD, EH, Z , K = Propagate(F[i], H, sig, chg, taux, c, mu, eps, n)

        App_ImpZ[i] = EH[0, 1]/EH[1, 1]

        Res[i] = np.abs(App_ImpZ[i])**2./(mu_0*omega(F[i]))
        Phase[i] = np.angle(App_ImpZ[i], deg = True)

    return Res, Phase

# Evaluate Up, Down components, E and H field, for a frequency range,
# a discretized depth range and a time range (use to calculate envelope)
MT.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def run(n, plotIt=True):
    # something to make a plot

    F = frange(-5., 5., 20)
    H = thick(50., 100., n)
    sign = sig(-5., 0., n)
    mun = mu(1., 2., n)
    epsn = eps(1., 9., n)
    chg = np.zeros_like(sign)
    taux = np.zeros_like(sign)
    c = np.zeros_like(sign)

    Res, Phase = appres(F, H, sign, chg, taux, c, mun, epsn, n)

    if plotIt:

        PlotAppRes(F, H, sign, chg, taux, c, mun, epsn, n, fenvelope=1000., PlotEnvelope=True)

    return Res, Phase
FDEMPlanewave.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def J_field_from_SheetCurruent(XYZ, srcLoc, sig, f, E0=1., orientation='X', kappa=0., epsr=1., t=0.):
    """
        Plane wave propagating downward (negative z (depth))
    """

    XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
    # Check
    if XYZ.shape[0] > 1 & f.shape[0] > 1:
        raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")

    mu = mu_0*(1+kappa)
    epsilon = epsilon_0*epsr
    sig_hat = sig + 1j*omega(f)*epsilon
    k  = np.sqrt( omega(f)**2. *mu*epsilon -1j*omega(f)*mu*sig )

    if orientation == "X":
        z = XYZ[:,2]
        Jx = sig*E0*np.exp(1j*(k*(z-srcLoc)+omega(f)*t))
        Jy = np.zeros_like(z)
        Jz = np.zeros_like(z)
        return Jx, Jy, Jz
    else:
        raise NotImplementedError()
FDEMPlanewave.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def H_field_from_SheetCurruent(XYZ, srcLoc, sig, f, E0=1., orientation='X', kappa=0., epsr=1., t=0.):
    """
        Plane wave propagating downward (negative z (depth))
    """

    XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
    # Check
    if XYZ.shape[0] > 1 & f.shape[0] > 1:
        raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")

    mu = mu_0*(1+kappa)
    epsilon = epsilon_0*epsr
    sig_hat = sig + 1j*omega(f)*epsilon
    k  = np.sqrt( omega(f)**2. *mu*epsilon -1j*omega(f)*mu*sig )
    Z = omega(f)*mu/k
    if orientation == "X":
        z = XYZ[:,2]
        Hx = np.zeros_like(z)
        Hy = E0/Z*np.exp(1j*(k*(z-srcLoc)+omega(f)*t))
        Hz = np.zeros_like(z)
        return Hx, Hy, Hz
    else:
        raise NotImplementedError()
FDEMPlanewave.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def B_field_from_SheetCurruent(XYZ, srcLoc, sig, f, E0=1., orientation='X', kappa=0., epsr=1., t=0.):
    """
        Plane wave propagating downward (negative z (depth))
    """

    XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
    # Check
    if XYZ.shape[0] > 1 & f.shape[0] > 1:
        raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")

    mu = mu_0*(1+kappa)
    epsilon = epsilon_0*epsr
    sig_hat = sig + 1j*omega(f)*epsilon
    k  = np.sqrt( omega(f)**2. *mu*epsilon -1j*omega(f)*mu*sig )
    Z = omega(f)*mu/k
    if orientation == "X":
        z = XYZ[:,2]
        Bx = mu*np.zeros_like(z)
        By = mu*E0/Z*np.exp(1j*(k*(z-srcLoc)+omega(f)*t))
        Bz = mu*np.zeros_like(z)
        return Bx, By, Bz
    else:
        raise NotImplementedError()
tests2.py 文件源码 项目:OpenSAPM 作者: pathfinder14 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def stup():
    x0 = x[0]
    init_cond = np.zeros_like(X)
    for i in range(0, x_nods_quantity):
        if 0 <= x0 < 0.3:
            init_cond[i] = 0
            x0 += h
        elif(0.3 <= x0 <= 0.7):
            init_cond[i] = 1
            x0 += h
        else:
            init_cond[i] = 0
            x0 += h
    return init_cond
#stupenka end
#different initial conditions end

# different transfer velocity


问题


面经


文章

微信
公众号

扫码关注公众号