python类tan()的实例源码

coreglib.py 文件源码 项目:demcoreg 作者: dshean 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):
    bin_range = (0, 360)
    a = (np.arange(*bin_range))
    f_a = nuth_func(a, fit[0], fit[1], fit[2])
    nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
    if xdata.size > maxpts:
        import random
        idx = random.sample(list(range(xdata.size)), 10000)
    else:
        idx = np.arange(xdata.size)
    f, ax = plt.subplots()
    ax.set_xlabel('Aspect (deg)')
    ax.set_ylabel('dh/tan(slope) (m)')
    ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')
    ax.plot(x, y, 'ro', label='Bin median')
    ax.axhline(color='k')
    ax.plot(a, f_a, 'b', label=nuth_func_str)
    ax.set_xlim(*bin_range)
    pad = 0.2 * np.max([np.abs(y.min()), np.abs(y.max())])
    ax.set_ylim(y.min() - pad, y.max() + pad)
    ax.legend(prop={'size':8})
    return f 

#Function copied from from openPIV pyprocess
volume_raycasting_example.py 文件源码 项目:ModernGL-Volume-Raycasting-Example 作者: ulricheck 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def pan(self, dx, dy, dz, relative=False):
        """
        Moves the center (look-at) position while holding the camera in place. 

        If relative=True, then the coordinates are interpreted such that x
        if in the global xy plane and points to the right side of the view, y is
        in the global xy plane and orthogonal to x, and z points in the global z
        direction. Distances are scaled roughly such that a value of 1.0 moves
        by one pixel on screen.

        """
        if not relative:
            self.camera_center += QtGui.QVector3D(dx, dy, dz)
        else:
            cPos = self.cameraPosition()
            cVec = self.camera_center - cPos
            dist = cVec.length()  ## distance from camera to center
            xDist = dist * 2. * np.tan(0.5 * self.camera_fov * np.pi / 180.)  ## approx. width of view at distance of center point
            xScale = xDist / self.width()
            zVec = QtGui.QVector3D(0,0,1)
            xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
            yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
            self.camera_center = self.camera_center + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
        self.update()
GLViewWidget.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def projectionMatrix(self, region=None):
        # Xw = (Xnd + 1) * width/2 + X
        if region is None:
            region = (0, 0, self.width(), self.height())

        x0, y0, w, h = self.getViewport()
        dist = self.opts['distance']
        fov = self.opts['fov']
        nearClip = dist * 0.001
        farClip = dist * 1000.

        r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
        t = r * h / w

        # convert screen coordinates (region) to normalized device coordinates
        # Xnd = (Xw - X0) * 2/width - 1
        ## Note that X0 and width in these equations must be the values used in viewport
        left  = r * ((region[0]-x0) * (2.0/w) - 1)
        right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
        bottom = t * ((region[1]-y0) * (2.0/h) - 1)
        top    = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)

        tr = QtGui.QMatrix4x4()
        tr.frustum(left, right, bottom, top, nearClip, farClip)
        return tr
GLViewWidget.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def pan(self, dx, dy, dz, relative=False):
        """
        Moves the center (look-at) position while holding the camera in place. 

        If relative=True, then the coordinates are interpreted such that x
        if in the global xy plane and points to the right side of the view, y is
        in the global xy plane and orthogonal to x, and z points in the global z
        direction. Distances are scaled roughly such that a value of 1.0 moves
        by one pixel on screen.

        """
        if not relative:
            self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
        else:
            cPos = self.cameraPosition()
            cVec = self.opts['center'] - cPos
            dist = cVec.length()  ## distance from camera to center
            xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)  ## approx. width of view at distance of center point
            xScale = xDist / self.width()
            zVec = QtGui.QVector3D(0,0,1)
            xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
            yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
            self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
        self.update()
functions.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
    """
    Construct a path outlining an arrow with the given dimensions.
    The arrow points in the -x direction with tip positioned at 0,0.
    If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
    If *tailLen* is None, no tail will be drawn.
    """
    headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
    path = QtGui.QPainterPath()
    path.moveTo(0,0)
    path.lineTo(headLen, -headWidth)
    if tailLen is None:
        innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, 0)
    else:
        tailWidth *= 0.5
        innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, -tailWidth)
        path.lineTo(headLen + tailLen, -tailWidth)
        path.lineTo(headLen + tailLen, tailWidth)
        path.lineTo(innerY, tailWidth)
    path.lineTo(headLen, headWidth)
    path.lineTo(0,0)
    return path
GLViewWidget.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def projectionMatrix(self, region=None):
        # Xw = (Xnd + 1) * width/2 + X
        if region is None:
            region = (0, 0, self.width(), self.height())

        x0, y0, w, h = self.getViewport()
        dist = self.opts['distance']
        fov = self.opts['fov']
        nearClip = dist * 0.001
        farClip = dist * 1000.

        r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
        t = r * h / w

        # convert screen coordinates (region) to normalized device coordinates
        # Xnd = (Xw - X0) * 2/width - 1
        ## Note that X0 and width in these equations must be the values used in viewport
        left  = r * ((region[0]-x0) * (2.0/w) - 1)
        right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
        bottom = t * ((region[1]-y0) * (2.0/h) - 1)
        top    = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)

        tr = QtGui.QMatrix4x4()
        tr.frustum(left, right, bottom, top, nearClip, farClip)
        return tr
functions.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def makeArrowPath(headLen=20, tipAngle=20, tailLen=20, tailWidth=3, baseAngle=0):
    """
    Construct a path outlining an arrow with the given dimensions.
    The arrow points in the -x direction with tip positioned at 0,0.
    If *tipAngle* is supplied (in degrees), it overrides *headWidth*.
    If *tailLen* is None, no tail will be drawn.
    """
    headWidth = headLen * np.tan(tipAngle * 0.5 * np.pi/180.)
    path = QtGui.QPainterPath()
    path.moveTo(0,0)
    path.lineTo(headLen, -headWidth)
    if tailLen is None:
        innerY = headLen - headWidth * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, 0)
    else:
        tailWidth *= 0.5
        innerY = headLen - (headWidth-tailWidth) * np.tan(baseAngle*np.pi/180.)
        path.lineTo(innerY, -tailWidth)
        path.lineTo(headLen + tailLen, -tailWidth)
        path.lineTo(headLen + tailLen, tailWidth)
        path.lineTo(innerY, tailWidth)
    path.lineTo(headLen, headWidth)
    path.lineTo(0,0)
    return path
structs.py 文件源码 项目:planetplanet 作者: rodluger 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def ecc(self, val):
        '''
        We need to update the time of pericenter passage whenever the
        eccentricty, longitude of pericenter, period, or time of transit
        changes. See the appendix in Shields et al. (2016).

        '''

        self._ecc = val
        fi = (3 * np.pi / 2.) - self._w
        self.tperi0 = self.t0 + (self.per * np.sqrt(1. - self.ecc * self.ecc) /
                     (2. * np.pi) * (self.ecc * np.sin(fi) /
                     (1. + self.ecc * np.cos(fi)) - 2.
                     / np.sqrt(1. - self.ecc * self.ecc)
                     * np.arctan2(np.sqrt(1. - self.ecc * self.ecc)
                     * np.tan(fi/2.), 1. + self.ecc)))
structure_base.py 文件源码 项目:modesolverpy 作者: jtambasco 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
                              x_top_right, x_bot_left, n_material):
        angle = np.radians(angle)
        trap_len = (y_top_right - y_bot_left) / np.tan(angle)
        num_x_iterations = round(trap_len/self.x_step)
        y_per_iteration = round(self.y_pts / num_x_iterations)

        lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
        rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)

        for i, _ in enumerate(xy_mask):
            xy_mask[i][:lhs_x_start_index] = False
            xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True

            if i % y_per_iteration == 0:
                lhs_x_start_index -= 1
                rhs_x_stop_index += 1

        self.n[xy_mask] = n_material
        return self.n
rmatrices.py 文件源码 项目:accpy 作者: kramerfelix 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def edge(ev):
    rho = ev[2]     # bending radius
    phi = ev[3]     # edge angle
    # distance between pole shoes g * pole shoe form faktor K
    gK = ev[5]      # K is ~0.45 for rectangular  and ~0.7 for Rogowski
    R = eye(6)
    tanphi = tan(phi)
    R[1, 0] = tanphi/rho
    if gK != 0:
        cosphi = cos(phi)
        sinphi = sin(phi)
        # Hinterberger 4.79 (exakt)
        R[3, 2] = -(tanphi-gK/rho*(1+(sinphi)**2)/(cosphi**3))/rho
        # Madx and Chao:
        # R[3,2] = -(tan(phi-gK/rho*(1+sinphi**2)/cosphi))/rho
    else:
        R[3, 2] = -tanphi/rho
    return R
astro.py 文件源码 项目:nmmn 作者: rsnemmen 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def arcsec2pc(d=15.,a=1.):
    """
Given the input angular size and distance to the object, computes
the corresponding linear size in pc. 

:param d: distance in Mpc
:param a: angular size in arcsec
:returns: linear size in pc
    """

    # convert arcsec to radians
    a=a*4.848e-6    
    # convert distance to pc instead of Mpc
    d=d*1e6

    return d*numpy.tan(a)
coords.py 文件源码 项目:pyberny 作者: azag0 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def eval(self, coords, grad=False):
        v1 = (coords[self.i]-coords[self.j])/bohr
        v2 = (coords[self.k]-coords[self.j])/bohr
        dot_product = np.dot(v1, v2)/(norm(v1)*norm(v2))
        if dot_product < -1:
            dot_product = -1
        elif dot_product > 1:
            dot_product = 1
        phi = np.arccos(dot_product)
        if not grad:
            return phi
        if abs(phi) > pi-1e-6:
            grad = [
                (pi-phi)/(2*norm(v1)**2)*v1,
                (1/norm(v1)-1/norm(v2))*(pi-phi)/(2*norm(v1))*v1,
                (pi-phi)/(2*norm(v2)**2)*v2
            ]
        else:
            grad = [
                1/np.tan(phi)*v1/norm(v1)**2-v2/(norm(v1)*norm(v2)*np.sin(phi)),
                (v1+v2)/(norm(v1)*norm(v2)*np.sin(phi)) -
                1/np.tan(phi)*(v1/norm(v1)**2+v2/norm(v2)**2),
                1/np.tan(phi)*v2/norm(v2)**2-v1/(norm(v1)*norm(v2)*np.sin(phi))
            ]
        return phi, grad
genericCameraMatrix.py 文件源码 项目:imgProcessor 作者: radjkarl 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def genericCameraMatrix(shape, angularField=60):
    '''
    Return a generic camera matrix
    [[fx, 0, cx],
    [ 0, fy, cy],
    [ 0, 0,   1]]
    for a given image shape
    '''
    # http://nghiaho.com/?page_id=576
    # assume that the optical centre is in the middle:
    cy = int(shape[0] / 2)
    cx = int(shape[1] / 2)

    # assume that the FOV is 60 DEG (webcam)
    fx = fy = cx / np.tan(angularField / 2 * np.pi /
                          180)  # camera focal length
    # see
    # http://docs.opencv.org/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
    return np.array([[fx, 0, cx],
                     [0, fy, cy],
                     [0, 0, 1]
                     ], dtype=np.float32)
agent_mjc.py 文件源码 项目:visual_mpc 作者: febert 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mujoco_to_imagespace(self, mujoco_coord, numpix=64, truncate=False):
        """
        convert form Mujoco-Coord to numpix x numpix image space:
        :param numpix: number of pixels of square image
        :param mujoco_coord:
        :return: pixel_coord
        """
        viewer_distance = .75  # distance from camera to the viewing plane
        window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance  # window height in Mujoco coords
        pixelheight = window_height / numpix  # height of one pixel
        pixelwidth = pixelheight
        window_width = pixelwidth * numpix
        middle_pixel = numpix / 2
        pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) /
                              pixelwidth + np.array([middle_pixel, middle_pixel]))
        pixel_coord = pixel_coord.astype(int)

        return pixel_coord
JensenOpenMDAO.py 文件源码 项目:Jensen3D 作者: byuflowlab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, nTurbs):
        super(effectiveVelocity, self).__init__()

        self.fd_options['form'] = 'central'
        self.fd_options['step_size'] = 1.0e-6
        self.fd_options['step_type'] = 'relative'

        self.nTurbs = nTurbs
        self.add_param('xr', val=np.zeros(nTurbs))
        self.add_param('r', val=np.zeros(nTurbs))
        self.add_param('alpha', val=np.tan(0.1))
        self.add_param('windSpeed', val=0.0)
        self.add_param('a', val=1./3.)
        self.add_param('overlap', val=np.empty([nTurbs, nTurbs]))

        self.add_output('hubVelocity', val=np.zeros(nTurbs))
JensenOpenMDAOconnect.py 文件源码 项目:Jensen3D 作者: byuflowlab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def conferenceWakeOverlap(X, Y, R):

    n = np.size(X)

    # theta = np.zeros((n, n), dtype=np.float)        # angle of wake from fulcrum
    f_theta = np.zeros((n, n), dtype=np.float)      # smoothing values for smoothing

    for i in range(0, n):
        for j in range(0, n):
            if X[i] < X[j]:
                z = R/np.tan(0.34906585)
                # print z
                theta = np.arctan((Y[j] - Y[i]) / (X[j] - X[i] + z))
                # print 'theta =', theta
                if -0.34906585 < theta < 0.34906585:
                    f_theta[i][j] = (1 + np.cos(9*theta))/2
                    # print f_theta

    # print z
    # print f_theta
    return f_theta
JensenOpenMDAOconnect.py 文件源码 项目:Jensen3D 作者: byuflowlab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def conferenceWakeOverlap_tune(X, Y, R, boundAngle):

    n = np.size(X)
    boundAngle = boundAngle*np.pi/180.0
    # theta = np.zeros((n, n), dtype=np.float)      # angle of wake from fulcrum
    f_theta = np.zeros((n, n), dtype=np.float)      # smoothing values for smoothing
    q = np.pi/boundAngle                            # factor inside the cos term of the smooth Jensen (see Jensen1983 eq.(3))
    # print 'boundAngle = %s' %boundAngle, 'q = %s' %q
    for i in range(0, n):
        for j in range(0, n):
            if X[i] < X[j]:
                # z = R/tan(0.34906585)
                z = R/np.tan(boundAngle)               # distance from fulcrum to wake producing turbine
                # print z
                theta = np.arctan((Y[j] - Y[i]) / (X[j] - X[i] + z))
                # print 'theta =', theta

                if -boundAngle < theta < boundAngle:

                    f_theta[i][j] = (1. + np.cos(q*theta))/2.
                    # print f_theta

    # print z
    # print f_theta
    return f_theta
JensenOpenMDAOconnect.py 文件源码 项目:Jensen3D 作者: byuflowlab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_cosine_factor_original(X, Y, R0, bound_angle=20.0):

    n = np.size(X)
    bound_angle = bound_angle*np.pi/180.0
    # theta = np.zeros((n, n), dtype=np.float)      # angle of wake from fulcrum
    f_theta = np.zeros((n, n), dtype=np.float)      # smoothing values for smoothing
    q = np.pi/bound_angle                           # factor inside the cos term of the smooth Jensen (see Jensen1983 eq.(3))

    for i in range(0, n):
        for j in range(0, n):
            if X[i] < X[j]:
                z = R0/np.tan(bound_angle)               # distance from fulcrum to wake producing turbine

                theta = np.arctan((Y[j] - Y[i]) / (X[j] - X[i] + z))

                if -bound_angle < theta < bound_angle:

                    f_theta[i][j] = (1. + np.cos(q*theta))/2.

    return f_theta
correction_train_flow.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mujoco_to_imagespace(mujoco_coord, numpix=64):
    """
    convert form Mujoco-Coord to numpix x numpix image space:
    :param numpix: number of pixels of square image
    :param mujoco_coord:
    :return: pixel_coord
    """
    viewer_distance = .75  # distance from camera to the viewing plane
    window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance  # window height in Mujoco coords
    pixelheight = window_height / numpix  # height of one pixel
    pixelwidth = pixelheight
    window_width = pixelwidth * numpix
    middle_pixel = numpix / 2
    pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) /
                          pixelwidth + np.array([middle_pixel, middle_pixel]))
    pixel_coord = pixel_coord.astype(int)
    return pixel_coord
VolIntegral.py 文件源码 项目:PyFrac 作者: GeoEnergyLab-EPFL 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def TipAsym_UniversalW_delt_Res(w, *args):
    """The residual function zero of which will give the General asymptote """

    (dist, Kprime, Eprime, muPrime, Cbar, Vel) = args

    Kh = Kprime * dist ** 0.5 / (Eprime * w)
    Ch = 2 * Cbar * dist ** 0.5 / (Vel ** 0.5 * w)
    sh = muPrime * Vel * dist ** 2 / (Eprime * w ** 3)

    g0 = f(Kh, 0.9911799823 * Ch, 10.392304845)
    delt = 10.392304845 * (1 + 0.9911799823 * Ch) * g0

    C1 = 4 * (1 - 2 * delt) / (delt * (1 - delt)) * np.tan(np.pi * delt)
    C2 = 16 * (1 - 3 * delt) / (3 * delt * (2 - 3 * delt)) * np.tan(3 * np.pi * delt / 2)
    b = C2 / C1

    return sh - f(Kh, Ch * b, C1)
TipInversion.py 文件源码 项目:PyFrac 作者: GeoEnergyLab-EPFL 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def TipAsym_Universal_delt_Res(dist, *args):
    """More precise function to be minimized to find root for universal Tip asymptote (see Donstov and Pierce)"""

    (wEltRibbon, Kprime, Eprime, muPrime, Cbar, DistLstTSEltRibbon, dt) = args

    Vel = (dist - DistLstTSEltRibbon) / dt
    Kh = Kprime * dist ** 0.5 / (Eprime * wEltRibbon)
    Ch = 2 * Cbar * dist ** 0.5 / (Vel ** 0.5 * wEltRibbon)
    sh = muPrime * Vel * dist ** 2 / (Eprime * wEltRibbon ** 3)

    g0 = f(Kh, 0.9911799823 * Ch, 10.392304845)
    delt = 10.392304845 * (1 + 0.9911799823 * Ch) * g0

    C1 = 4 * (1 - 2 * delt) / (delt * (1 - delt)) * np.tan(math.pi * delt)
    C2 = 16 * (1 - 3 * delt) / (3 * delt * (2 - 3 * delt)) * np.tan(3 * math.pi * delt / 2)
    b = C2 / C1

    return sh - f(Kh, Ch * b, C1)


# ----------------------------------------------------------------------------------------------------------------------
camera_utils.py 文件源码 项目:vsi_common 作者: VisionSystemsInc 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def construct_K(image_size, focal_len=None, fov_degrees=None, fov_radians=None):
  """ create calibration matrix K using the image size and focal length or field of view angle
  Assumes 0 skew and principal point at center of image
  Note that image_size = (width, height)
  Note that fov is assumed to be measured horizontally
  """
  if not np.sum([focal_len is not None, fov_degrees is not None, fov_radians is not None]) == 1:
    raise Exception('Specify exactly one of [focal_len, fov_degrees, fov_radians]')

  if fov_degrees is not None:
    fov_radians = np.deg2rad(fov_degrees)
  if fov_radians is not None:
    focal_len = image_size[0] / (2.0 * np.tan(fov_radians/2.0))

  K = np.array([[focal_len, 0, image_size[0]/2.0], [0, focal_len, image_size[1]/2.0], [0, 0, 1]])
  return K
01_Brachistochrone_Problem.py 文件源码 项目:OpenGoddard 作者: istellartech 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def inequality(prob, obj):
    x = prob.states_all_section(0)
    y = prob.states_all_section(1)
    v = prob.states_all_section(2)
    theta = prob.controls_all_section(0)
    tf = prob.time_final(-1)

    result = Condition()

    # lower bounds
    result.lower_bound(tf, 0.1)
    result.lower_bound(y, 0)
    result.lower_bound(theta, 0)

    # upper bounds
    # result.upper_bound(theta, np.pi/2)
    # result.upper_bound(y, x * np.tan(obj.theta0) + obj.h)

    return result()
dcm.py 文件源码 项目:pyins 作者: nmayorov 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _dtheta_from_omega_matrix(theta):
    norm = np.linalg.norm(theta, axis=1)
    k = np.empty_like(norm)

    mask = norm > 1e-4
    nm = norm[mask]
    k[mask] = (1 - 0.5 * nm / np.tan(0.5 * nm)) / nm**2
    mask = ~mask
    nm = norm[mask]
    k[mask] = 1/12 + 1/720 * nm**2

    A = np.empty((norm.shape[0], 3, 3))
    skew = _skew_matrix_array(theta)
    A[:] = np.identity(3)
    A[:] += 0.5 * skew
    A[:] += k[:, None, None] * util.mm_prod(skew, skew)

    return A
SourceUndulatorPlane.py 文件源码 项目:und_Sophie_2016 作者: SophieTh 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def describe_ring(self,distance,harmonic_number,ring_number,t=None):
        if ring_number==0 :
            X=np.array([0.0])
            Y=X
        else :
            if t is None :
                t=np.linspace(0.0,0.5*np.pi,101)
            n=harmonic_number
            theta=self.angle_ring_number(harmonic_number=n, ring_number=ring_number)
            if distance==None :
                R=theta
            else :
                R=distance*np.tan(theta)
            X=R*np.cos(t)
            Y=R*np.sin(t)
        return X,Y

    # in photon /sec /1% /mrad*mrad
camera.py 文件源码 项目:car-detection 作者: mmetcalfe 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def openGLPerspectiveMatrix(fovy, aspect, near, far):
    # print 'fovy:', fovy
    # print 'aspect:', aspect
    # print 'near:', near
    # print 'far:', far

    f = 1.0 / np.tan(fovy/2.0)

    # print 'f:', f

    return np.matrix([
    [f/aspect, 0, 0, 0],
    [0, f, 0, 0],
    [0, 0, (far+near)/(near-far), (2.0*near*far)/(near-far)],
    [0, 0, -1, 0]
    ], np.float32)
geometry.py 文件源码 项目:theia 作者: bandang0 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def basis(a):
    '''Returns two vectors u and v such that (a, u, v) is a direct ON basis.

    '''
    ez = np.array([0., 0., 1.], dtype = np.float64)

    if np.abs(np.dot(a, ez)) == 1.:
        u = np.dot(a, ez) * np.array([1., 0., 0.], dtype = np.float64)
        v = np.array([0., 1., 0.], dtype = np.float64)
        return u,v
    else:
        theta = np.arccos(np.dot(a, ez))

        u = ez/np.sin(theta) - a/np.tan(theta)
        v = np.cross(a, u)
        u = u/np.linalg.norm(u)
        v = v/np.linalg.norm(v)
        return u, v
pathplan.py 文件源码 项目:rmp 作者: iamprem 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def nh_steer(self, q_nearest, q_rand, epsilon):
        """
        For a car like robot, where it takes two control input (u_speed, u_phi)
        All possible combinations of control inputs are generated and used to find the closest q_new to q_rand
        :param q_nearest:
        :param q_rand:
        :param epsilon:
        :return:
        """
        L = 20.0  # Length between midpoints of front and rear axle of the car like robot
        u_speed, u_phi = [-1.0, 1.0], [-math.pi/4, 0, math.pi/4]
        controls = list(itertools.product(u_speed, u_phi))
        # euler = lambda t_i, q, u_s, u_p, L: (u_s*math.cos(q[2]), u_s*math.sin(q[2]), u_s/L*math.tan(u_p))
        result = []
        ctrls_path = {c: [] for c in controls}
        for ctrl in controls:
            q_new = q_nearest
            for t_i in range(epsilon):  # h is assumed to be 1 here for euler integration
                q_new = tuple(map(add, q_new, self.euler(t_i, q_new, ctrl[0], ctrl[1], L)))
                ctrls_path[ctrl].append(q_new)
            result.append((ctrl[0], ctrl[1], q_new))
        q_news = [x[2] for x in result]
        _, _, idx = self.nearest_neighbour(q_rand, np.array(q_news))
        return result[idx], ctrls_path
brightcons_plg.py 文件源码 项目:imagepy 作者: Image-Py 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def run(self, ips, snap, img, para = None):
        if not ips.imgtype in ('8-bit', 'rgb'):
            mid = (self.arange[0] + self.arange[1])/2 - para['bright']
            length = (self.arange[1] - self.arange[0])/np.tan(para['contrast']/180.0*np.pi)
            ips.range = (mid-length/2, mid+length/2)
            return
        if para == None: para = self.para
        mid = 128-para['bright']
        length = 255/np.tan(para['contrast']/180.0*np.pi)
        print(255/np.tan(para['contrast']/180.0*np.pi)/2)
        print(mid-length/2, mid+length/2)
        img[:] = snap
        if mid-length/2>0:
            np.subtract(img, mid-length/2, out=img, casting='unsafe')
            np.multiply(img, 255.0/length, out=img, casting='unsafe')
        else:
            np.multiply(img, 255.0/length, out=img, casting='unsafe')
            np.subtract(img, (mid-length/2)/length*255, out=img, casting='unsafe')
        img[snap<mid-length/2] = 0
        img[snap>mid+length/2] = 255
image_augmentation.py 文件源码 项目:dlcv_for_beginners 作者: frombeijingwithlove 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def rotate_image(img, angle, crop):
    h, w = img.shape[:2]
    angle %= 360
    M_rotate = cv2.getRotationMatrix2D((w/2, h/2), angle, 1)
    img_rotated = cv2.warpAffine(img, M_rotate, (w, h))

    if crop:
        angle_crop = angle % 180
        if angle_crop > 90:
            angle_crop = 180 - angle_crop
        theta = angle_crop * np.pi / 180.0
        hw_ratio = float(h) / float(w)
        tan_theta = np.tan(theta)
        numerator = np.cos(theta) + np.sin(theta) * tan_theta
        r = hw_ratio if h > w else 1 / hw_ratio
        denominator = r * tan_theta + 1
        crop_mult = numerator / denominator
        w_crop = int(round(crop_mult*w))
        h_crop = int(round(crop_mult*h))
        x0 = int((w-w_crop)/2)
        y0 = int((h-h_crop)/2)

        img_rotated = crop_image(img_rotated, x0, y0, w_crop, h_crop)

    return img_rotated


问题


面经


文章

微信
公众号

扫码关注公众号