python类urlretrieve()的实例源码

5_word2vec.py 文件源码 项目:udacity-deep-learning 作者: runhani 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename
1_notmnist.py 文件源码 项目:udacity-deep-learning 作者: runhani 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes, force=False):
  """Download a file if not present, and make sure it's the right size."""
  if force or not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified', filename)
  else:
    raise Exception(
      'Failed to verify' + filename + '. Can you get to it with a browser?')
  return filename
__init__.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def __delitem__(self, key):
    del self._inv[super(bidict, self).__getitem__(key)]
    return dict.__delitem__(self, key)


# Under Python 2, 'urlretrieve' relies on FancyURLopener from legacy
# urllib module, known to have issues with proxy management
__init__.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def get_file(fname, origin, outdir):
  '''
  Parameters
  ----------
  fname: output file name
  origin: url, link
  outdir: path to output dir
  '''
  fpath = os.path.join(outdir, fname)
  # ====== remove empty folder ====== #
  if os.path.exists(fpath):
    if os.path.isdir(fpath) and len(os.listdir(fpath)) == 0:
      shutil.rmtree(fpath)
  # ====== download package ====== #
  if not os.path.exists(fpath):
    prog = Progbar(target=-1, name="Downloading: %s" % origin,
                   print_report=True, print_summary=True)

    def dl_progress(count, block_size, total_size):
      if prog.target < 0:
        prog.target = total_size
      else:
        prog.add(count * block_size - prog.seen_so_far)
    error_msg = 'URL fetch failure on {}: {} -- {}'
    try:
      try:
        urlretrieve(origin, fpath, dl_progress)
      except URLError as e:
        raise Exception(error_msg.format(origin, e.errno, e.reason))
      except HTTPError as e:
        raise Exception(error_msg.format(origin, e.code, e.msg))
    except (Exception, KeyboardInterrupt) as e:
      if os.path.exists(fpath):
        os.remove(fpath)
      raise
  return fpath


# ===========================================================================
# Python utilities
# ===========================================================================
test_eval_detection_voc.py 文件源码 项目:chainercv 作者: chainer 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def setUpClass(cls):
        base_url = 'https://github.com/yuyu2172/' \
            'share-weights/releases/download/0.0.3'

        cls.dataset = np.load(request.urlretrieve(os.path.join(
            base_url,
            'voc_detection_dataset_2007_test_truncated_2017_06_06.npz'))[0])
        cls.result = np.load(request.urlretrieve(os.path.join(
            base_url,
            'voc_detection_result_2007_test_truncated_2017_06_06.npz'))[0])
drop_out_lstm.py 文件源码 项目:tensor_flow 作者: eecrazy 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename
base_lstm.py 文件源码 项目:tensor_flow 作者: eecrazy 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename
bigram_lstm.py 文件源码 项目:tensor_flow 作者: eecrazy 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename
cbow.py 文件源码 项目:tensor_flow 作者: eecrazy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename
optimize_lstm.py 文件源码 项目:tensor_flow 作者: eecrazy 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename
skip_gram.py 文件源码 项目:tensor_flow 作者: eecrazy 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename
embed_bigram_lstm_one_char_label.py 文件源码 项目:tensor_flow 作者: eecrazy 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def maybe_download(filename, expected_bytes):
  """Download a file if not present, and make sure it's the right size."""
  if not os.path.exists(filename):
    filename, _ = urlretrieve(url + filename, filename)
  statinfo = os.stat(filename)
  if statinfo.st_size == expected_bytes:
    print('Found and verified %s' % filename)
  else:
    print(statinfo.st_size)
    raise Exception(
      'Failed to verify ' + filename + '. Can you get to it with a browser?')
  return filename

# filename = maybe_download('/users1/zyli/data/text8.zip', 31344016)
data.py 文件源码 项目:BinaryNetConvolution 作者: rarilurelo 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def download_mnist_data():
    print('Downloading {:s}...'.format(train_images))
    request.urlretrieve('{:s}/{:s}'.format(parent, train_images), train_images)
    print('Done')
    print('Downloading {:s}...'.format(train_labels))
    request.urlretrieve('{:s}/{:s}'.format(parent, train_labels), train_labels)
    print('Done')
    print('Downloading {:s}...'.format(test_images))
    request.urlretrieve('{:s}/{:s}'.format(parent, test_images), test_images)
    print('Done')
    print('Downloading {:s}...'.format(test_labels))
    request.urlretrieve('{:s}/{:s}'.format(parent, test_labels), test_labels)
    print('Done')

    print('Converting training data...')
    data_train, target_train = load_mnist(train_images, train_labels,
                                          num_train)
    print('Done')
    print('Converting test data...')
    data_test, target_test = load_mnist(test_images, test_labels, num_test)
    mnist = {'data': np.append(data_train, data_test, axis=0),
             'target': np.append(target_train, target_test, axis=0)}

    print('Done')
    print('Save output...')
    with open('mnist.pkl', 'wb') as output:
        six.moves.cPickle.dump(mnist, output, -1)
    print('Done')
    print('Convert completed')
data.py 文件源码 项目:catgan 作者: smayru 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def download_mnist_data():
    print('Downloading {:s}...'.format(train_images))
    request.urlretrieve('{:s}/{:s}'.format(parent, train_images), train_images)
    print('Done')
    print('Downloading {:s}...'.format(train_labels))
    request.urlretrieve('{:s}/{:s}'.format(parent, train_labels), train_labels)
    print('Done')
    print('Downloading {:s}...'.format(test_images))
    request.urlretrieve('{:s}/{:s}'.format(parent, test_images), test_images)
    print('Done')
    print('Downloading {:s}...'.format(test_labels))
    request.urlretrieve('{:s}/{:s}'.format(parent, test_labels), test_labels)
    print('Done')

    print('Converting training data...')
    data_train, target_train = load_mnist(train_images, train_labels,
                                          num_train)
    print('Done')
    print('Converting test data...')
    data_test, target_test = load_mnist(test_images, test_labels, num_test)
    mnist = {'data': np.append(data_train, data_test, axis=0),
             'target': np.append(target_train, target_test, axis=0)}

    print('Done')
    print('Save output...')
    with open('mnist.pkl', 'wb') as output:
        six.moves.cPickle.dump(mnist, output, -1)
    print('Done')
    print('Convert completed')
data_utils.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def urlretrieve(url, filename, reporthook=None, data=None):
        """Replacement for `urlretrive` for Python 2.

        Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
        `urllib` module, known to have issues with proxy management.

        # Arguments
            url: url to retrieve.
            filename: where to store the retrieved data locally.
            reporthook: a hook function that will be called once
                on establishment of the network connection and once
                after each block read thereafter.
                The hook will be passed three arguments;
                a count of blocks transferred so far,
                a block size in bytes, and the total size of the file.
            data: `data` argument passed to `urlopen`.
        """
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                    reporthook(count, total_size, total_size)
                    break
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
data_utils.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def urlretrieve(url, filename, reporthook=None, data=None):
        """Replacement for `urlretrive` for Python 2.

        Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
        `urllib` module, known to have issues with proxy management.

        # Arguments
            url: url to retrieve.
            filename: where to store the retrieved data locally.
            reporthook: a hook function that will be called once
                on establishment of the network connection and once
                after each block read thereafter.
                The hook will be passed three arguments;
                a count of blocks transferred so far,
                a block size in bytes, and the total size of the file.
            data: `data` argument passed to `urlopen`.
        """
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                    reporthook(count, total_size, total_size)
                    break
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
mnist_tools.py 文件源码 项目:unrolled-gan 作者: musyoku 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def download_mnist_data():
    print("Downloading {} ...".format(train_images_filename))
    request.urlretrieve("{}/{}".format(parent, train_images_filename), train_images_filename)
    print("Downloading {} ...".format(train_labels_filename))
    request.urlretrieve("{}/{}".format(parent, train_labels_filename), train_labels_filename)
    print("Downloading {} ...".format(test_images_filename))
    request.urlretrieve("{}/{}".format(parent, test_images_filename), test_images_filename)
    print("Downloading {} ...".format(test_labels_filename))
    request.urlretrieve("{}/{}".format(parent, test_labels_filename), test_labels_filename)
    print("Done")
utils.py 文件源码 项目:databrewer 作者: rmax 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def download_file(url, filename, quiet=True, reporthook_kwargs=None):
    """Downloads a file with optional progress report."""
    if '://' not in url:
        raise ValueError("fully qualified URL required: %s" % url)
    if url.partition('://')[0] not in ('https', 'http', 'ftp'):
        raise ValueError("unsupported URL schema: %s" % url)

    if url.startswith('ftp://'):
        retrieve = _urlretrieve
    else:
        retrieve = _urlretrieve_requests

    if quiet:
        return retrieve(url, filename)

    reporthook_kwargs = reporthook_kwargs or {}
    if filename:
        reporthook_kwargs.setdefault('desc', filename)

    reporthook_kwargs.setdefault('unit', 'b')
    reporthook_kwargs.setdefault('unit_scale', True)

    reporthook = _ReportHook(**reporthook_kwargs)
    retrieve = _urlretrieve if url.startswith('ftp://') else _urlretrieve_requests
    with contextlib.closing(reporthook):
        retrieve(url, filename, reporthook)
data_utils.py 文件源码 项目:dspp-keras 作者: PeptoneInc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def urlretrieve(url, filename, reporthook=None, data=None):
        """Replacement for `urlretrive` for Python 2.

        Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
        `urllib` module, known to have issues with proxy management.

        # Arguments
            url: url to retrieve.
            filename: where to store the retrieved data locally.
            reporthook: a hook function that will be called once
                on establishment of the network connection and once
                after each block read thereafter.
                The hook will be passed three arguments;
                a count of blocks transferred so far,
                a block size in bytes, and the total size of the file.
            data: `data` argument passed to `urlopen`.
        """
        def chunk_read(response, chunk_size=8192, reporthook=None):
            total_size = response.info().get('Content-Length').strip()
            total_size = int(total_size)
            count = 0
            while 1:
                chunk = response.read(chunk_size)
                count += 1
                if not chunk:
                    reporthook(count, total_size, total_size)
                    break
                if reporthook:
                    reporthook(count, chunk_size, total_size)
                yield chunk

        response = urlopen(url, data)
        with open(filename, 'wb') as fd:
            for chunk in chunk_read(response, reporthook=reporthook):
                fd.write(chunk)
load_data.py 文件源码 项目:rascal-tensorflow 作者: stayrascal 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def maybe_download(filename, force=False):
    """Download a file if not present"""
    filename = os.path.join(root, filename)
    if force or not os.path.exists(filename):
        print('Attempting to download:', filename)
        filename, _ = urlretrieve(url + filename, filename)
        print('Download Complete!')
    return filename


问题


面经


文章

微信
公众号

扫码关注公众号