python类_C的实例源码

__init__.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _free_mutex():
    torch._C._cuda_lock_mutex()
    try:
        yield
    finally:
        torch._C._cuda_unlock_mutex()
function.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def once_differentiable(fn):
    from .variable import Variable

    @functools.wraps(fn)
    def wrapper(ctx, *args):
        tensor_args = [arg.data if isinstance(arg, Variable) else arg
                       for arg in args]
        outputs = fn(ctx, *tensor_args)
        # XXX: this is only an approximation of these flags - there's no way
        # to figure out if fn didn't use ctx.saved_variables and as a result
        # some Variables might require grad, even if no args do.
        # Unfortunately, this leads to unexpected error messages ("no nodes
        # require computing gradients"), but I don't have a better idea.
        # These functions would raise an error in backward anyway.
        volatile = any(arg.volatile if isinstance(arg, Variable) else False
                       for arg in args)
        requires_grad = any(arg.requires_grad if isinstance(arg, Variable) else False
                            for arg in args)
        if volatile:
            def err_fn(*args):
                return args
            kwargs = {'volatile': True}
        else:
            err_fn = torch._C._functions.DelayedError(
                b"trying to differentiate twice a function that was marked"
                b"with @once_differentiable")
            kwargs = {'requires_grad': requires_grad}
        if not isinstance(outputs, tuple):
            var = Variable(outputs, **kwargs) if outputs is not None else None
            return err_fn(var)
        return err_fn(*[Variable(o, **kwargs) if o is not None else None
                      for o in outputs])
    return wrapper
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def is_available():
    """Returns a bool indicating if CUDA is currently available."""
    if (not hasattr(torch._C, '_cuda_isDriverSufficient') or
            not torch._C._cuda_isDriverSufficient()):
        return False
    return torch._C._cuda_getDeviceCount() > 0
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _sleep(cycles):
    torch._C._cuda_sleep(cycles)
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __enter__(self):
        if self.idx is -1:
            return
        _lazy_init()
        self.prev_idx = torch._C._cuda_getDevice()
        if self.prev_idx != self.idx:
            torch._C._cuda_setDevice(self.idx)
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def set_device(device):
    """Sets the current device.

    Usage of this function is discouraged in favor of :any:`device`. In most
    cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable.

    Arguments:
        device (int): selected device. This function is a no-op if this
            argument is negative.
    """
    if device >= 0:
        torch._C._cuda_setDevice(device)
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def device_count():
    """Returns the number of GPUs available."""
    if is_available():
        _lazy_init()
        return torch._C._cuda_getDeviceCount()
    else:
        return 0
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def current_device():
    """Returns the index of a currently selected device."""
    _lazy_init()
    return torch._C._cuda_getDevice()
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def synchronize():
    """Waits for all kernels in all streams on current device to complete."""
    _lazy_init()
    return torch._C._cuda_synchronize()
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def current_blas_handle():
    """Returns cublasHandle_t pointer to current cuBLAS handle"""
    return torch._C._cuda_getCurrentBlasHandle()
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _host_allocator():
    _lazy_init()
    return torch._C._cuda_cudaHostAllocator()
__init__.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _free_mutex():
    torch._C._cuda_lock_mutex()
    try:
        yield
    finally:
        torch._C._cuda_unlock_mutex()
function.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def once_differentiable(fn):
    from .variable import Variable

    @functools.wraps(fn)
    def wrapper(ctx, *args):
        tensor_args = [arg.data if isinstance(arg, Variable) else arg
                       for arg in args]
        outputs = fn(ctx, *tensor_args)
        # XXX: this is only an approximation of these flags - there's no way
        # to figure out if fn didn't use ctx.saved_variables and as a result
        # some Variables might require grad, even if no args do.
        # Unfortunately, this leads to unexpected error messages ("no nodes
        # require computing gradients"), but I don't have a better idea.
        # These functions would raise an error in backward anyway.
        volatile = any(arg.volatile if isinstance(arg, Variable) else False
                       for arg in args)
        requires_grad = any(arg.requires_grad if isinstance(arg, Variable) else False
                            for arg in args)
        if volatile:
            def err_fn(*args):
                return args
            kwargs = {'volatile': True}
        else:
            err_fn = torch._C._functions.DelayedError(
                b"trying to differentiate twice a function that was marked"
                b"with @once_differentiable")
            kwargs = {'requires_grad': requires_grad}
        if not isinstance(outputs, tuple):
            var = Variable(outputs, **kwargs) if outputs is not None else None
            return err_fn(var)
        return err_fn(*[Variable(o, **kwargs) if o is not None else None
                      for o in outputs])
    return wrapper
functional.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def batch_norm(input, running_mean, running_var, weight=None, bias=None,
               training=False, momentum=0.1, eps=1e-5):
    f = torch._C._functions.BatchNorm(running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled)
    return f(input, weight, bias)


# loss
__init__.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def is_available():
    """Returns a bool indicating if CUDA is currently available."""
    if (not hasattr(torch._C, '_cuda_isDriverSufficient') or
            not torch._C._cuda_isDriverSufficient()):
        return False
    return torch._C._cuda_getDeviceCount() > 0
__init__.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _sleep(cycles):
    torch._C._cuda_sleep(cycles)
__init__.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _lazy_init():
    global _initialized, _cudart, _original_pid, _queued_calls
    if _initialized:
        return
    if _in_bad_fork:
        from sys import version_info
        if version_info < (3, 4):
            msg = ("To use CUDA with multiprocessing, you must use Python "
                   "3.4+ and the 'spawn' start method")
        else:
            msg = ("To use CUDA with multiprocessing, you must use the "
                   "'spawn' start method")
        raise RuntimeError(
            "Cannot re-initialize CUDA in forked subprocess. " + msg)
    _check_driver()
    torch._C._cuda_init()
    torch._C._cuda_sparse_init()
    _cudart = _load_cudart()
    _cudart.cudaGetErrorName.restype = ctypes.c_char_p
    _cudart.cudaGetErrorString.restype = ctypes.c_char_p
    _original_pid = os.getpid()
    _initialized = True
    for queued_call, orig_traceback in _queued_calls:
        try:
            queued_call()
        except Exception as e:
            msg = ("CUDA call failed lazily at initialization with error: {}\n\n"
                   "CUDA call was originally invoked at:\n\n{}").format(str(e), orig_traceback)
            raise_from(DeferredCudaCallError(msg), e)
__init__.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __exit__(self, *args):
        if self.prev_idx != self.idx:
            torch._C._cuda_setDevice(self.prev_idx)
        return False
__init__.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def set_device(device):
    """Sets the current device.

    Usage of this function is discouraged in favor of :any:`device`. In most
    cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable.

    Arguments:
        device (int): selected device. This function is a no-op if this
            argument is negative.
    """
    if device >= 0:
        torch._C._cuda_setDevice(device)
__init__.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_device_name(device):
    """Gets the name of a device.

    Arguments:
        device (int): device for which to return the name. This function is a
            no-op if this argument is negative.
    """
    if device >= 0:
        return torch._C._cuda_getDeviceName(device)


问题


面经


文章

微信
公众号

扫码关注公众号