python类build_ext()的实例源码

setup.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def run(self):
        # cwrap depends on pyyaml, so we can't import it earlier
        from tools.cwrap import cwrap
        from tools.cwrap.plugins.THPPlugin import THPPlugin
        from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
        from tools.cwrap.plugins.AutoGPU import AutoGPU
        from tools.cwrap.plugins.BoolOption import BoolOption
        from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
        from tools.cwrap.plugins.NullableArguments import NullableArguments
        from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin
        cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[
            BoolOption(), THPPlugin(), AutoGPU(condition='IS_CUDA'),
            ArgcountSortPlugin(), KwargsPlugin(),
        ])
        cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[
            CuDNNPlugin(), NullableArguments()
        ])
        # It's an old-style class in Python 2.7...
        setuptools.command.build_ext.build_ext.run(self)
setup.py 文件源码 项目:pytorch-dist 作者: apaszke 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def run(self):
        self.run_command('build_py')
        self.run_command('build_ext')
setup.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def run(self):
        self.run_command('build_py')
        self.run_command('build_ext')
setup.py 文件源码 项目:pytorch 作者: tylergenter 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def run(self):
        # Print build options
        if WITH_NUMPY:
            print('-- Building with NumPy bindings')
        else:
            print('-- NumPy not found')
        if WITH_CUDNN:
            print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR)
        else:
            print('-- Not using cuDNN')
        if WITH_CUDA:
            print('-- Detected CUDA at ' + CUDA_HOME)
        else:
            print('-- Not using CUDA')
        if WITH_NCCL and SYSTEM_NCCL:
            print('-- Using system provided NCCL library')
        elif WITH_NCCL:
            print('-- Building NCCL library')
        else:
            print('-- Not using NCCL')

        # cwrap depends on pyyaml, so we can't import it earlier
        from tools.cwrap import cwrap
        from tools.cwrap.plugins.THPPlugin import THPPlugin
        from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
        from tools.cwrap.plugins.AutoGPU import AutoGPU
        from tools.cwrap.plugins.BoolOption import BoolOption
        from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
        from tools.cwrap.plugins.NullableArguments import NullableArguments
        from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin
        from tools.cwrap.plugins.WrapDim import WrapDim
        thp_plugin = THPPlugin()
        cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[
            BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'),
            ArgcountSortPlugin(), KwargsPlugin(), WrapDim()
        ])
        cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[
            CuDNNPlugin(), NullableArguments()
        ])
        # It's an old-style class in Python 2.7...
        setuptools.command.build_ext.build_ext.run(self)
setup.py 文件源码 项目:pykaldi 作者: pykaldi 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def install(self):
        self.build_dir = 'build/lib'
        setuptools.command.install_lib.install_lib.install(self)

################################################################################
# Setup pykaldi
################################################################################

# We add a 'dummy' extension so that setuptools runs the build_ext step.
setup.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def run(self):
        self.run_command('build_py')
        self.run_command('build_ext')
setup.py 文件源码 项目:pytorch-coriander 作者: hughperkins 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def run(self):
        # Print build options
        if WITH_NUMPY:
            print('-- Building with NumPy bindings')
        else:
            print('-- NumPy not found')
        if WITH_CUDNN:
            print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR)
        else:
            print('-- Not using cuDNN')
        if WITH_CUDA:
            print('-- Detected CUDA at ' + CUDA_HOME)
        else:
            print('-- Not using CUDA')
        if WITH_NCCL and SYSTEM_NCCL:
            print('-- Using system provided NCCL library')
        elif WITH_NCCL:
            print('-- Building NCCL library')
        else:
            print('-- Not using NCCL')

        # cwrap depends on pyyaml, so we can't import it earlier
        from tools.cwrap import cwrap
        from tools.cwrap.plugins.THPPlugin import THPPlugin
        from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
        from tools.cwrap.plugins.AutoGPU import AutoGPU
        from tools.cwrap.plugins.BoolOption import BoolOption
        from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
        from tools.cwrap.plugins.NullableArguments import NullableArguments
        from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin
        from tools.cwrap.plugins.WrapDim import WrapDim
        thp_plugin = THPPlugin()
        cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[
            BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'),
            ArgcountSortPlugin(), KwargsPlugin(), WrapDim()
        ])
        cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[
            CuDNNPlugin(), NullableArguments()
        ])
        # It's an old-style class in Python 2.7...
        setuptools.command.build_ext.build_ext.run(self)
setup_helpers.py 文件源码 项目:bezier 作者: dhermes 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, *args, **kwargs):
        setuptools.command.build_ext.build_ext.__init__(self, *args, **kwargs)
        self.journal_file = os.environ.get(JOURNAL_ENV)
        self.commands = []
setup_helpers.py 文件源码 项目:bezier 作者: dhermes 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def run(self):
        self.set_f90_compiler()
        self.start_journaling()

        self.compile_fortran_obj_files()

        result = setuptools.command.build_ext.build_ext.run(self)
        self.save_journal()
        self.cleanup()

        return result
setup.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def run(self):
        self.run_command('build_py')
        self.run_command('build_ext')
build_module.py 文件源码 项目:cppimport 作者: tbenthompson 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, libdest, *args, **kwargs):
        self.libdest = libdest
        setuptools.Extension.__init__(self, *args, **kwargs)

# Subclass setuptools build_ext to put the compiled shared library in the
# appropriate place in the source tree.
setup.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def run(self):
        self.run_command('build_py')
        self.run_command('build_ext')
setup.py 文件源码 项目:pytorch 作者: ezyang 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def run(self):
        # Print build options
        if WITH_NUMPY:
            print('-- Building with NumPy bindings')
        else:
            print('-- NumPy not found')
        if WITH_CUDNN:
            print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR)
        else:
            print('-- Not using cuDNN')
        if WITH_CUDA:
            print('-- Detected CUDA at ' + CUDA_HOME)
        else:
            print('-- Not using CUDA')
        if WITH_NCCL and WITH_SYSTEM_NCCL:
            print('-- Using system provided NCCL library at ' +
                  NCCL_LIB_DIR + ', ' + NCCL_INCLUDE_DIR)
        elif WITH_NCCL:
            print('-- Building NCCL library')
        else:
            print('-- Not using NCCL')
        if WITH_DISTRIBUTED:
            print('-- Building with distributed package ')
            monkey_patch_THD_link_flags()
        else:
            print('-- Building without distributed package')

        # cwrap depends on pyyaml, so we can't import it earlier
        from tools.cwrap import cwrap
        from tools.cwrap.plugins.THPPlugin import THPPlugin
        from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
        from tools.cwrap.plugins.AutoGPU import AutoGPU
        from tools.cwrap.plugins.BoolOption import BoolOption
        from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
        from tools.cwrap.plugins.NullableArguments import NullableArguments
        from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin
        from tools.cwrap.plugins.WrapDim import WrapDim
        from tools.cwrap.plugins.AssertNDim import AssertNDim
        from tools.cwrap.plugins.Broadcast import Broadcast
        from tools.cwrap.plugins.ProcessorSpecificPlugin import ProcessorSpecificPlugin
        from tools.autograd.gen_variable_type import gen_variable_type
        thp_plugin = THPPlugin()
        cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[
            ProcessorSpecificPlugin(), BoolOption(), thp_plugin,
            AutoGPU(condition='IS_CUDA'), ArgcountSortPlugin(), KwargsPlugin(),
            AssertNDim(), WrapDim(), Broadcast()
        ])
        cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[
            CuDNNPlugin(), NullableArguments()
        ])
        # Build ATen based Variable classes
        autograd_gen_dir = 'torch/csrc/autograd/generated'
        if not os.path.exists(autograd_gen_dir):
            os.mkdir(autograd_gen_dir)
        gen_variable_type(
            'torch/lib/build/ATen/ATen/Declarations.yaml',
            autograd_gen_dir)

        # It's an old-style class in Python 2.7...
        setuptools.command.build_ext.build_ext.run(self)
setup.py 文件源码 项目:pytorch 作者: pytorch 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def run(self):

        # Print build options
        if WITH_NUMPY:
            print('-- Building with NumPy bindings')
        else:
            print('-- NumPy not found')
        if WITH_CUDNN:
            print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR)
        else:
            print('-- Not using cuDNN')
        if WITH_CUDA:
            print('-- Detected CUDA at ' + CUDA_HOME)
        else:
            print('-- Not using CUDA')
        if WITH_NCCL and WITH_SYSTEM_NCCL:
            print('-- Using system provided NCCL library at ' +
                  NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR)
        elif WITH_NCCL:
            print('-- Building NCCL library')
        else:
            print('-- Not using NCCL')
        if WITH_DISTRIBUTED:
            print('-- Building with distributed package ')
            monkey_patch_THD_link_flags()
        else:
            print('-- Building without distributed package')

        generate_code(ninja_global)

        if IS_WINDOWS:
            build_temp = self.build_temp
            build_dir = 'torch/csrc'

            ext_filename = self.get_ext_filename('_C')
            lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib'

            _C_LIB = os.path.join(build_temp, build_dir, lib_filename).replace('\\', '/')

            THNN.extra_link_args += [_C_LIB]
            if WITH_CUDA:
                THCUNN.extra_link_args += [_C_LIB]
            else:
                # To generate .obj files for AutoGPU for the export class
                # a header file cannot build, so it has to be copied to someplace as a source file
                if os.path.exists("torch/csrc/generated/AutoGPU_cpu_win.cpp"):
                    os.remove("torch/csrc/generated/AutoGPU_cpu_win.cpp")
                shutil.copyfile("torch/csrc/cuda/AutoGPU.h", "torch/csrc/generated/AutoGPU_cpu_win.cpp")
        if WITH_NINJA:
            # before we start the normal build make sure all generated code
            # gets built
            ninja_global.run()

        # It's an old-style class in Python 2.7...
        setuptools.command.build_ext.build_ext.run(self)


问题


面经


文章

微信
公众号

扫码关注公众号