python类URLopener()的实例源码

getphotos.py 文件源码 项目:FBI-Scraper 作者: GKalliatakis 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_biased_photos(tag, min_taken_date, max_taken_date):
    #Change Folder Path
    if os.path.isdir(tag):
        pass
    else:
        os.mkdir(tag)
    os.chdir(tag)

    #Run image download
    for page in range(1,8):
        photos = flickr.photos_search(tags=tag, page=page, per_page=500, tag_mode='all',
                                      sort="interestingness-desc",
                                      min_taken_date=min_taken_date,
                                      max_taken_date=max_taken_date)
        for photo in photos:
            try:
                url = photo.getURL(size='Original', urlType='source')
                urllist.append(url)
                image = urllib.URLopener()
                image.retrieve(url, os.path.basename(urlparse.urlparse(url).path))
                print 'Downloading...', url
            except flickr.FlickrError:
                print 'Link no longer available (!)'
########################################################################
scorer.py 文件源码 项目:corpus-to-graph-ml 作者: CatalystCode 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_model_from_url(url):
    # TODO: move this into a class..
    global scoring_model
    url_opener = urllib.URLopener()
    temp_model_path =  get_temp_model_path()
    url_opener.retrieve(url, temp_model_path)

    # try to load the model:
    try:
        temp_model = ScoringModel.from_file(temp_model_path)
    except Exception as e:
        print "Failed to load donwloaded model: %s"%e
        os.remove(temp_model_path)
        raise RuntimeError("Failed to load donwloaded model! error: %s"%e)

    # update model:
    scoring_model = temp_model

    # delete existing model
    if (path.isfile(model_file_path)):
        os.remove(model_file_path)
    os.rename(temp_model_path, model_file_path)


# TODO: move this to an object with an init function...
dnscrypt.py 文件源码 项目:useless-scripts 作者: veerendra2 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def download_csv():
    servers_dict = dict()
    global SERVERS_LIST_FILE
    print Colors.OKBLUE+"Downloading Latest 'dnscrypt-resolvers.csv'.."+Colors.ENDC
    try:
        csv_file = urllib.URLopener()
        csv_file.retrieve("https://raw.githubusercontent.com/jedisct1/dnscrypt-proxy/master/dnscrypt-resolvers.csv", "/opt/dnscrypt-resolvers.csv")
    except:
        print Colors.WARNING+"Unable to download 'dnscrypt-resolvers.csv'. Using default /usr/share/dnscrypt-proxy/dnscrypt-resolvers.csv"+Colors.ENDC
        if os.path.exists("/usr/share/dnscrypt-proxy/dnscrypt-resolvers.csv"):
            SERVERS_LIST_FILE="/usr/share/dnscrypt-proxy/dnscrypt-resolvers.csv"
        else:
            print Colors.FAIL+"Default csv file not found. Exiting.."+Colors.ENDC
            exit(2)
    with open(SERVERS_LIST_FILE) as f:
        data = list(csv.reader(f, delimiter=",", quotechar='"', skipinitialspace=True))[1:]
        print "Index".ljust(5, " "), "Name".ljust(25, " "), "Location".ljust(25, " "), "DNSSEC".ljust(8,
                                                                                                      " "), "No Log".ljust(
            7, " "), "Resolver Address".ljust(30)
        print "".ljust(100, "-")
        for rows, index in zip(data, enumerate(data)):
            servers_dict.setdefault(index[0], rows[0])
            print str(index[0]).ljust(5, " "), rows[0].ljust(25, " "), rows[3].ljust(25, " "), rows[7].ljust(8, " "), \
            rows[9].ljust(7, " "), rows[10].ljust(30, " ")
    return servers_dict
utils.py 文件源码 项目:dmr_utils 作者: n0mjs710 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def try_download(_path, _file, _url, _stale,):
    now = time()
    url = URLopener()
    file_exists = isfile(_path+_file) == True
    if file_exists:
        file_old = (getmtime(_path+_file) + _stale) < now
    if not file_exists or (file_exists and file_old):
        try:
            url.retrieve(_url, _path+_file)
            result = 'ID ALIAS MAPPER: \'{}\' successfully downloaded'.format(_file)
        except IOError:
            result = 'ID ALIAS MAPPER: \'{}\' could not be downloaded'.format(_file)
    else:
        result = 'ID ALIAS MAPPER: \'{}\' is current, not downloaded'.format(_file)
    url.close()
    return result

# LEGACY VERSION - MAKES A SIMPLE {INTEGER ID: 'CALLSIGN'} DICTIONARY
Getdata.py 文件源码 项目:gps2tec 作者: weihan1107 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_p1p2data(self):
        import urllib, os
        if self.file_exist():
            print "No need to download P1P2 DCB data..."
            return
        print "Start to download P1P2 DCB data..."
        weblink = "ftp://ftp.unibe.ch/aiub/CODE/{0}/".format(self.year)
        if not os.path.isfile(self.sourcefn):
            try:
                download = urllib.URLopener()
                download.retrieve(weblink+self.sourcefn, self.sourcefn)
            except IOError:
                weblink = "ftp://ftp.unibe.ch/aiub/CODE/{0}/".format(self.year_bom)
                download = urllib.URLopener()
                download.retrieve(weblink+self.sourcefn_bom, self.sourcefn)


        os.system("gzip -fd {0}".format(self.sourcefn))
Getdata.py 文件源码 项目:gps2tec 作者: weihan1107 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_p1c1data(self):
        import urllib, os
        if self.file_exist():
            print "No need to download P1C1 DCB data..."
            return
        print "Start to download P1C1 DCB data..."
        weblink = "ftp://ftp.unibe.ch/aiub/CODE/{0}/".format(self.year)
        if not os.path.isfile(self.sourcefn):
            try:
                download = urllib.URLopener()
                download.retrieve(weblink+self.sourcefn, self.sourcefn)
            except IOError:
                weblink = "ftp://ftp.unibe.ch/aiub/CODE/{0}/".format(self.year_bom)
                download = urllib.URLopener()
                download.retrieve(weblink+self.sourcefn_bom, self.sourcefn)

        os.system("gzip -fd {0}".format(self.sourcefn))
crawler.py 文件源码 项目:tf-tutorial 作者: zchen0211 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def download(start, end):
  parse_dict = np.load('parse_dict')
  image = urllib.URLopener()
  for k in parse_dict.keys()[start:end]:
    # makedir of k
    log.info('crawling images of class %s' % k)
    data_path = os.path.join('/media/DATA/ImageNet/Extra/', k)
    if not os.path.exists(data_path):
      os.mkdir(data_path)
      cnt = 0
      for link in parse_dict[k][:500]:
        fn = os.path.join(data_path, '%s_%d.jpg' %(k, cnt))
        cnt += 1
        if cnt % 20 == 0: log.info('%d images' % cnt)
      # print fn
      try: 
        image.retrieve(link, fn)
      except IOError:
        cnt -= 1
    # print len(parse_dict[k])
recipe-224043.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, *args):
            self.version = "iegrab/0.1"
        self.open = self.iegrab
        apply(urllib.URLopener.__init__, (self,) + args)
commands.py 文件源码 项目:PyHero 作者: Splinter0 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def getLast(passwd):
    df = "http://10.5.5.9/" #DEFAULT PARTS
    p1 = "?t="
    p2 = "&p=%"

    par1, par2, opt = photo_mode() #MOVING TO PHOTO MODE
    urllib2.urlopen(df + par1 + "/" + par2 + p1 + passwd + p2 + opt)
    time.sleep(1)

    print("\n\r[" + extra.colors.yellow + ".." + extra.colors.end + "] Taking a pic")
    par1, par2, opt = shut()  #TAKE A PIC
    urllib2.urlopen(df + par1 + "/" + par2 + p1 + passwd + p2 + opt)
    time.sleep(2)

    url = "http://10.5.5.9:8080/gp/gpMediaList" #FIND THE PICTURE USING SOME REGEX
    content = urllib2.urlopen(url).read()
    content = str(content)
    content2 = content.split("},")
    last = content2[-1]
    last = re.findall('[A-Z+][0-9+]*', last)
    last = ''.join(last)
    last = re.sub(r'(JP)', r'.JP', last)

    time.sleep(1)
    print("\n\r[" + extra.colors.yellow + ".." + extra.colors.end + "] Downloading the pic")
    dow = "http://10.5.5.9:8080/DCIM/103GOPRO/" + last #DOWNLOAD THE PIC AND SAVE IT TO output/
    getFoto = urllib.URLopener()
    getFoto.retrieve("http://10.5.5.9:8080/DCIM/103GOPRO/" + last, "outputs/" + last)
    print("\r\n[" + extra.colors.green + "+" + extra.colors.end + "] Picture saved in outputs/"+last+"\r\n")
    try :
        time.sleep(2)
        process = subprocess.Popen("eog -f outputs/"+last, shell=True, stdout=subprocess.PIPE)
    except :
        pass

#TODO : ADD INFO() FUNCTION TO GET ALL INFORMATIONS ABOUT THE GOPRO AND ADD DELALL() THAT DELETE ALL FILES ON GOPRO
images_downloader.py 文件源码 项目:images-web-crawler 作者: amineHorseman 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def download(self, links, target_folder='./data'):
        """Download images from a lisk of links"""

        # check links and folder:
        if len(links) < 1:
            print("Error: Empty list, no links provided")
            exit()
        self.images_links = links
        DatasetBuilder.check_folder_existance(target_folder)
        if target_folder[-1] == '/':
            target_folder = target_folder[:-1]

        # start downloading:
        print("Downloading files...")
        progress = 0
        images_nbr = sum([len(self.images_links[key]) for key in self.images_links])
        for keyword, links in self.images_links.items():
            DatasetBuilder.check_folder_existance(target_folder + '/' + keyword, display_msg=False)
            for link in links:
                target_file = target_folder + '/' + keyword + '/' + link.split('/')[-1]
                try:
                    f = urllib.URLopener()
                    f.retrieve(link, target_file)
                except IOError:
                    self.failed_links.append(link)
                progress = progress + 1
                print("\r >> Download progress: ", (progress * 100 / images_nbr), "%...", end="")
                sys.stdout.flush()

        print("\r >> Download progress: ", (progress * 100 / images_nbr), "%")
        print(" >> ", (progress - len(self.failed_links)), " images downloaded")

        # save failed links:
        if len(self.failed_links):
            f2 = open(target_folder + "/failed_list.txt", 'w')
            for link in self.failed_links:
                f2.write(link + "\n")
            print(" >> Failed to download ", len(self.failed_links),
                  " images: access not granted ",
                  "(links saved to: '", target_folder, "/failed_list.txt')")
IngestPrices.py 文件源码 项目:crypto-forcast 作者: 7yl4r 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def run(self):
        # with self.output().open('w') as f:
        src = "http://api.bitcoincharts.com/v1/csv/coinbaseUSD.csv.gz"
        testfile = urllib.URLopener()
        testfile.retrieve(src, self.output())
get_datasets.py 文件源码 项目:metaqnn 作者: bowenbaker 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_cifar100(save_dir=None, root_path=None):
    ''' If root_path is None, we download the data set from internet.

        Either save path or root path must not be None and not both.

        Returns Xtr, Ytr, Xte, Yte as numpy arrays
    '''

    assert((save_dir is not None and root_path is None) or (save_dir is None and root_path is not None))

    if root_path is None:
        print 'Downloading CIFAR100 dataset...'
        tar_path = os.path.join(save_dir, "cifar-100-python.tar.gz")
        url = urllib.URLopener()
        url.retrieve("https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz", tar_path)
        print 'Download Done, Extracting...'
        tar = tarfile.open(tar_path)
        tar.extractall(save_dir)
        tar.close()

    root = os.path.join(save_dir, "cifar-100-python") if not root_path else root_path

    Xtr, Ytr = load_cifar100_data(os.path.join(root, 'train'))
    Xte, Yte = load_cifar100_data(os.path.join(root, 'test'))


    print 'Xtrain shape', Xtr.shape
    print 'Ytrain shape', Ytr.shape
    print 'Xtest shape', Xte.shape
    print 'Ytest shape', Yte.shape

    return Xtr, Ytr, Xte, Yte
DataDownloader.py 文件源码 项目:WPEAR 作者: stephenlienharrell 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def download(self, url, filedir, defaultdownloaddir='/sourceFileDownloads'):
        downloadDirectory = defaultdownloaddir
        if not os.path.exists(downloadDirectory):
            os.makedirs(downloadDirectory)

        fullURL = urlparse.urljoin(url, filedir)
        file = urllib.URLopener()

        fileDownloadPath = downloadDirectory + '/' + fullURL.split('/')[-1]
        file.retrieve(fullURL, fileDownloadPath)
        return fileDownloadPath
mnist_loader.py 文件源码 项目:Vulcan 作者: rfratila 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _download_file(file_path, folder='data'):
    print("Downloading {}...".format(file_path))

    test_file = urllib.URLopener()
    file_name = file_path.split('/')[-1]
    test_file.retrieve(file_path, '{}/{}'.format(folder, file_name))
webserver.py 文件源码 项目:nerodia 作者: watir 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def stop(self):
        self.stop_serving = True
        try:
            # This is to force stop the server loop
            urllib_request.URLopener().open('http://{}:{}'.format(self.host, self.port))
        except IOError:
            pass
        logging.info('Shutting down the webserver')
        self.thread.join()
dnscrypt-auto.py 文件源码 项目:useless-scripts 作者: veerendra2 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def download_csv():
    global SERVERS_LIST_FILE
    print Colors.OKBLUE+"Downloading Latest 'dnscrypt-resolvers.csv'.."+Colors.ENDC
    try:
        csv_file = urllib.URLopener()
        csv_file.retrieve("https://raw.githubusercontent.com/jedisct1/dnscrypt-proxy/master/dnscrypt-resolvers.csv", "/opt/dnscrypt-resolvers.csv")
    except:
        print Colors.WARNING+"Unable to download 'dnscrypt-resolvers.csv'. Using default /usr/share/dnscrypt-proxy/dnscrypt-resolvers.csv"+Colors.ENDC
        if os.path.exists("/usr/share/dnscrypt-proxy/dnscrypt-resolvers.csv"):
            SERVERS_LIST_FILE="/usr/share/dnscrypt-proxy/dnscrypt-resolvers.csv"
        else:
            print Colors.FAIL+"Default csv file not found. Exiting.."+Colors.ENDC
            exit(2)
backup.py 文件源码 项目:CIDDS 作者: markusring 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def getCurrentServerConfig():
    newConfigFile = urllib.URLopener()
    newConfigFile.retrieve("YOUR_SERVER_IP/scripts/automation/packages/system/serverconfig.ini", "/home/debian/serverconfig.ini")

# Configure backup server
backup.py 文件源码 项目:CIDDS 作者: markusring 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def getCurrentServerConfig():
    newConfigFile = urllib.URLopener()
    newConfigFile.retrieve("YOUR_SERVER_IP/scripts/automation/packages/system/serverconfig.ini", "/home/debian/serverconfig.ini")

# Configure backup server
backup.py 文件源码 项目:CIDDS 作者: markusring 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def getCurrentServerConfig():
    newConfigFile = urllib.URLopener()
    newConfigFile.retrieve("YOUR_SERVER_IP/scripts/automation/packages/system/serverconfig.ini", "/home/debian/serverconfig.ini")

# Configure backup server
readIni.py 文件源码 项目:CIDDS 作者: markusring 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def getCurrentServerConfig():
    newConfigFile = urllib.URLopener()
    newConfigFile.retrieve("YOUR_SERVER_IP/automation/packages/system/serverconfig.ini", "packages/system/serverconfig.ini")

# Configure different server services
urlopen.py 文件源码 项目:bandit-ss 作者: zeroSteiner 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_urlopen():
    # urllib
    url = urllib.quote('file:///bin/ls')
    urllib.urlopen(url, 'blah', 32)
    urllib.urlretrieve('file:///bin/ls', '/bin/ls2')
    opener = urllib.URLopener()
    opener.open('file:///bin/ls')
    opener.retrieve('file:///bin/ls')
    opener = urllib.FancyURLopener()
    opener.open('file:///bin/ls')
    opener.retrieve('file:///bin/ls')

    # urllib2
    handler = urllib2.HTTPBasicAuthHandler()
    handler.add_password(realm='test',
                         uri='http://mysite.com',
                         user='bob')
    opener = urllib2.build_opener(handler)
    urllib2.install_opener(opener)
    urllib2.urlopen('file:///bin/ls')
    urllib2.Request('file:///bin/ls')

    # Python 3
    urllib.request.urlopen('file:///bin/ls')
    urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2')
    opener = urllib.request.URLopener()
    opener.open('file:///bin/ls')
    opener.retrieve('file:///bin/ls')
    opener = urllib.request.FancyURLopener()
    opener.open('file:///bin/ls')
    opener.retrieve('file:///bin/ls')

    # Six
    six.moves.urllib.request.urlopen('file:///bin/ls')
    six.moves.urllib.request.urlretrieve('file:///bin/ls', '/bin/ls2')
    opener = six.moves.urllib.request.URLopener()
    opener.open('file:///bin/ls')
    opener.retrieve('file:///bin/ls')
    opener = six.moves.urllib.request.FancyURLopener()
    opener.open('file:///bin/ls')
    opener.retrieve('file:///bin/ls')
utils.py 文件源码 项目:tichu-tournament 作者: aragos 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def datareader(url,opener=urllib.URLopener().open):
        return opener(url).read()
main.py 文件源码 项目:tensorflow-art 作者: nitroventures 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def assure_model_file(model_file):
  model_file_path = os.path.join(CHECKPOINTS_DIR, model_file)

  if not os.path.isfile(model_file_path):
    url_opener = urllib.URLopener()
    print "downloading " + model_file
    url_opener.retrieve(MODEL_URL + "/" + model_file, model_file_path)
base.py 文件源码 项目:gui-o-matic 作者: mailpile 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _do(self, op, args):
        op, args = op.lower(), copy.copy(args)

        if op == 'show_url':
            self.show_url(url=args[0])

        elif op in ('get_url', 'post_url'):
            url = args.pop(0)
            base_url = '/'.join(url.split('/')[:3])

            uo = urllib.URLopener()
            for cookie, value in self.config.get('http_cookies', {}
                                                 ).get(base_url, []):
                uo.addheader('Cookie', '%s=%s' % (cookie, value))

            if op == 'post_url':
                (fn, hdrs) = uo.retrieve(url, data=args)
            else:
                (fn, hdrs) = uo.retrieve(url)
            hdrs = unicode(hdrs)

            with open(fn, 'rb') as fd:
                data = fd.read().strip()

            if data.startswith('{') and 'application/json' in hdrs:
                data = json.loads(data)
                if 'message' in data:
                    self.notify_user(data['message'])

        elif op == "shell":
            try:
                for arg in args:
                    rv = os.system(arg)
                    if 0 != rv:
                        raise OSError(
                            'Failed with exit code %d: %s' % (rv, arg))
            except:
                traceback.print_exc()

        elif hasattr(self, op):
            getattr(self, op)(**(args or {}))
checker.py 文件源码 项目:Insanity-Framework 作者: Exploit-install 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def download_python():
    """ Download python for some reason..? """
    banner("Downloading Python 2.7.x.msi, please wait...")
    urllib.URLopener().retrieve("https://www.python.org/ftp/python/2.7.12/python-2.7.12.msi")
    os.system('sudo wine msiexec /i python-2.7.12.msi /L*v log.txt')
    os.system('clear')
checker.py 文件源码 项目:Insanity-Framework 作者: Exploit-install 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def download_python_win_exten():
    """ Download Windows extenstion for python without checking the checksum.. """
    banner("Downloading pywin32-220.win32-py2.7.exe (Windows extension), please wait...")
    urllib.URLopener().retrieve("https://ufpr.dl.sourceforge.net/project/pywin32/pywin32/Build%20220/pywin32-220.win32-py2.7.exe")
    os.system('sudo wine pywin32-220.win32-py2.7.exe')
    os.system(
        'sudo wine /root/.wine/drive_c/Python27/python.exe /root/.wine/drive_c/Python27/Scripts/pip.exe install pyinstaller')
    os.system(
        'sudo wine /root/.wine/drive_c/Python27/python.exe /root/.wine/drive_c/Python27/Scripts/pip.exe uninstall Crypto')
    os.system('sudo wine /root/.wine/drive_c/Python27/python.exe /root/.wine/drive_c/Python27/Scripts/pip.exe pycrypto')
    os.system('clear')
checker.py 文件源码 项目:Insanity-Framework 作者: Exploit-install 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def download_vc_for_py():
    """ Download the VC extenstion for python, this is a little less scary because it's from MS """
    banner("Downloading VCForPython27.msi, please wait...")
    urllib.URLopener().retrieve("https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi")
    os.system('wget https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi')
    os.system('sudo wine msiexec /i VCForPython27.msi /L*v log2.txt')
    os.system('mkdir .OK')
    os.system('sudo rm -Rf log2.txt')
    os.system('sudo rm -Rf log.txt')
test1.py 文件源码 项目:python_learn 作者: jetty-guo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def save_file(url, file_name):
    urlopen = urllib.URLopener()
    fp = urlopen.open(url)
    data = fp.read()
    fp.close()
    file = open(path+file_name,'wb')
    file.write(data)
    file.close()
metadata.py 文件源码 项目:deb-python-pysaml2 作者: openstack 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def fetch_metadata(url, path, maxage=600):
    """
    :param url:  metadata remote location
    :param path: metdata file name
    :param maxage: if max age of existing metadata file (s) is exceeded,
     the file will be fetched from the remote location
    """
    fetch = False
    if not os.path.isfile(path):
        fetch = True
        logger.debug("metadata file %s not found", path)
    elif (os.path.getmtime(path) + maxage) < time.time():
        fetch = True
        logger.debug("metadata file %s from %s is more than %s s old",
                     path,
                     strftime("%Y-%m-%d %H:%M:%S", time.localtime(os.path.getmtime(path))),
                     maxage)
    else:
        logger.debug("metadata file %s is less than %s s old", path, maxage)
    if fetch:
        f=urllib.URLopener()
        try:
            f.retrieve(url, path)
            logger.debug("downloaded metadata from %s into %s", url, path)
        except:
            logger.debug("downloaded metadata from %s failed: %s",
                         url, sys.exc_info()[0])
aws_connector.py 文件源码 项目:snap-to-cloud-examples 作者: synapse-wireless 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_cafile(self):
        """Download a certificate to authenticate the identity of the AWS IoT platform."""
        authority_location = "https://www.symantec.com/content/en/us/enterprise/verisign/roots/VeriSign-Class%203-Public-Primary-Certification-Authority-G5.pem"

        url = urllib.URLopener()
        cafile = self.get_abs_path(CAFILE)
        url.retrieve(authority_location, cafile)


问题


面经


文章

微信
公众号

扫码关注公众号