python类Browser()的实例源码

WEBgmxCh4cker_threading.py 文件源码 项目:WebGmxChecker 作者: SUP3RIA 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def login(i,j,ur):
    ua = UserAgent()
    cookiejar =cookielib.LWPCookieJar()
    br = mechanize.Browser()
    br.set_cookiejar(cookiejar)
    #br.set_debug_http(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(False)
    br.addheaders = [('User-Agent', ua.random), ('Accept', '*/*')]
    url = "http://www."+ur+"/"
    br.open(url)
    br.select_form(nr = 1)
    br.form['username'] = i
    br.form['password'] = j
    br.submit()
    if len(br.geturl()) == 72:
        return True
    else:
        return False
cookiejar_browser.py 文件源码 项目:SPSE 作者: ioef 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def urlOpen(url,username, password, submitted):
    global cookiejar
    #Check if the function is called for the 1st time
    #if it is not reuse the cookie previously created   
    if submitted == True:
        br = mechanize.Browser()
        br.set_handle_robots(False)
        br.set_cookiejar(cookiejar) 
        br.open(url)

        #select the 1st form
        br.select_form(nr=0)
        #fill in the credentials
        form['username'] = username
        form['password'] = password
        br.submit()
    else:
        br2 = mechanize.Browser()
        br.set_handle_robots(False)
        br2.set_cookiejar(cookiejar)
        br2.open(url)
browser.py 文件源码 项目:llk 作者: Tycx2ry 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_browser(self):
        '''Returns a mechanize.Browser object configured with the framework's global options.'''
        br = mechanize.Browser()
        # set the user-agent header
        br.addheaders = [('User-agent', self._global_options['user-agent'])]
        # set debug options
        if self._global_options['verbosity'] >= 2:
            br.set_debug_http(True)
            br.set_debug_redirects(True)
            br.set_debug_responses(True)
        # set proxy
        if self._global_options['proxy']:
            br.set_proxies({'http': self._global_options['proxy'], 'https': self._global_options['proxy']})
        # additional settings
        br.set_handle_robots(False)
        # set timeout
        socket.setdefaulttimeout(self._global_options['timeout'])
        return br
WEBgmxCh4cker.py 文件源码 项目:WebGmxChecker 作者: SUP3RIA 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def login(i,j,ur):
    ua = UserAgent()
    cookiejar =cookielib.LWPCookieJar()
    br = mechanize.Browser()
    br.set_cookiejar(cookiejar)
    #br.set_debug_http(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(False)
    br.addheaders = [('User-Agent', ua.random), ('Accept', '*/*')]
    url = "http://www."+ur+"/"
    br.open(url)
    br.select_form(nr = 1)
    br.form['username'] = i
    br.form['password'] = j
    br.submit()
    if len(br.geturl()) == 72:
        return True
    else:
        return False
edit.py 文件源码 项目:WebScraping 作者: liinnux 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def mechanize_edit():
    """Use mechanize to increment population
    """
    # login
    br = mechanize.Browser()
    br.open(login.LOGIN_URL)
    br.select_form(nr=0)
    print br.form
    br['email'] = login.LOGIN_EMAIL
    br['password'] = login.LOGIN_PASSWORD
    response = br.submit()

    # edit country
    br.open(COUNTRY_URL)
    br.select_form(nr=0)
    print 'Population before:', br['population']
    br['population'] = str(int(br['population']) + 1)
    br.submit()

    # check population increased
    br.open(COUNTRY_URL)
    br.select_form(nr=0)
    print 'Population after:', br['population']
MechanizeCrawler.py 文件源码 项目:Crawler 作者: xinhaojing 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, headers = {}, debug = True, p = ''):
        self.timeout = 10  
        self.br = mechanize.Browser() #???br
        self.cj = cookielib.LWPCookieJar()
        self.br.set_cookiejar(self.cj)#??cookie
        self.br.set_handle_equiv(True)#????http equiv
        self.br.set_handle_gzip(True)#??????
        self.br.set_handle_redirect(True)#???????
        self.br.set_handle_referer(True)#??????referer
        self.br.set_handle_robots(False)#????robots??
        self.br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
        self.br.addheaders = [('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36')]
        self.debug = debug
        #debug?????????????
        if self.debug:
            self.br.set_debug_http(True)
            self.br.set_debug_redirects(True) 
            self.br.set_debug_responses(True)
        #headers
        for keys in headers.keys():
            self.br.addheaders += [(key, headers[key]), ]
        #proxy
        if len(p) > 0 and p != 'None' and p != None and p != 'NULL':
            self.br.set_proxies({'http': p})
browser.py 文件源码 项目:recon-ng 作者: Hehe-Zhc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_browser(self):
        '''Returns a mechanize.Browser object configured with the framework's global options.'''
        br = mechanize.Browser()
        # set the user-agent header
        br.addheaders = [('User-agent', self._global_options['user-agent'])]
        # set debug options
        if self._global_options['debug']:
            br.set_debug_http(True)
            br.set_debug_redirects(True)
            br.set_debug_responses(True)
        # set proxy
        if self._global_options['proxy']:
            br.set_proxies({'http': self._global_options['proxy'], 'https': self._global_options['proxy']})
        # additional settings
        br.set_handle_robots(False)
        # set timeout
        socket.setdefaulttimeout(self._global_options['timeout'])
        return br
babac.py 文件源码 项目:sopel-modules 作者: normcyr 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def login(username, password):
    br = mechanize.Browser()
    login_url = "http://cyclebabac.com/wp-login.php"

    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)

    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    # Want debugging messages? Uncomment this
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # Perform the actual login
    br.open(login_url)
    br.select_form(nr=0)
    br.form['log'] = str(username)
    br.form['pwd'] = str(password)
    br.submit()

    return br
browser.py 文件源码 项目:recon-ng 作者: captainhooligan 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_browser(self):
        '''Returns a mechanize.Browser object configured with the framework's global options.'''
        br = mechanize.Browser()
        # set the user-agent header
        br.addheaders = [('User-agent', self._global_options['user-agent'])]
        # set debug options
        if self._global_options['verbosity'] >= 2:
            br.set_debug_http(True)
            br.set_debug_redirects(True)
            br.set_debug_responses(True)
        # set proxy
        if self._global_options['proxy']:
            br.set_proxies({'http': self._global_options['proxy'], 'https': self._global_options['proxy']})
        # additional settings
        br.set_handle_robots(False)
        # set timeout
        socket.setdefaulttimeout(self._global_options['timeout'])
        return br
test_browser.py 文件源码 项目:mechanize 作者: python-mechanize 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_encoding(self):
        import mechanize
        from StringIO import StringIO
        import urllib
        # always take first encoding, since that's the one from the real HTTP
        # headers, rather than from HTTP-EQUIV
        b = mechanize.Browser()
        for s, ct in [
            ("", mechanize._html.DEFAULT_ENCODING),
            ("Foo: Bar\r\n\r\n", mechanize._html.DEFAULT_ENCODING),
            ("Content-Type: text/html; charset=UTF-8\r\n\r\n", "UTF-8"),
            ("Content-Type: text/html; charset=UTF-8\r\n"
             "Content-Type: text/html; charset=KOI8-R\r\n\r\n", "UTF-8"),
        ]:
            msg = mimetools.Message(StringIO(s))
            r = urllib.addinfourl(StringIO(""), msg, "http://www.example.com/")
            b.set_response(r)
            self.assertEqual(b.encoding(), ct)
test_browser.py 文件源码 项目:mechanize 作者: python-mechanize 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_clone_browser(self):
        from mechanize import Browser
        br = Browser()
        br.set_handle_refresh(True, max_time=237, honor_time=True)
        br.set_handle_robots(False)
        cbr = copy.copy(br)
        for h, ch in zip(br.handlers, cbr.handlers):
            self.assertIsNot(h, ch)
            self.assertIs(ch.parent, cbr)
            self.assertIs(h.__class__, ch.__class__)
        self.assertEqual(set(br._ua_handlers), set(cbr._ua_handlers))
        self.assertIs(br._ua_handlers['_cookies'].cookiejar,
                      cbr._ua_handlers['_cookies'].cookiejar)
        self.assertIsNot(br.addheaders, cbr.addheaders)
        self.assertEqual(br.addheaders, cbr.addheaders)
        h = cbr._ua_handlers['_refresh']
        self.assertEqual((h.honor_time, h.max_time), (True, 237))
test_performance.py 文件源码 项目:mechanize 作者: python-mechanize 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def performance_plot():
    def retrieve(url, filename):
        br = mechanize.Browser()
        br.retrieve(url, filename)

#     import urllib2
#     def retrieve(url, filename):
#         urllib2.urlopen(url).read()

#     from mechanize import _useragent
#     ua = _useragent.UserAgent()
#     ua.set_seekable_responses(True)
#     ua.set_handle_equiv(False)
#     def retrieve(url, filename):
#         ua.retrieve(url, filename)

    rows = []
    for size in power_2_range(256 * KB, 256 * MB):
        temp_maker = TempDirMaker()
        try:
            elapsed = time_retrieve_local_file(temp_maker, size, retrieve)
        finally:
            temp_maker.tear_down()
        rows.append((size // float(MB), elapsed))
    show_plot(rows)
Win-discoverNetworks.py 文件源码 项目:WintersWrath 作者: Elfsong 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def wiglePrint(username, password, netid):
    browser = mechanize.Browser()
    browser.open('https://wigle.net/')
    reqData = urllib.urlencode({'credential_0': username, 'credential_1': password})
    browser.open('https://wigle.net/gps/gps/main/confirmquery', reqData)

    params = {}
    params['netid'] = netid
    reqParams = urllib.urlencode(params)

    resp = browser.open('https://api.wigle.net/api/v2/network/search?first=0&netid=' + netid).read()

    data = json.loads(resp)

    mapLat = 'N/A'
    mapLon = 'N/A'
    rLat = data['results'][0].get('trilat', None)
    if rLat:
        mapLat = rLat
    rLon = ata['results'][0].get('trilong', None)
    if rLon:
        mapLon = rLon
    print '[-] Lat: ' + mapLat + ', Lon: ' + mapLon
amplispy.py 文件源码 项目:AmpliSpy 作者: NullArray 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def mech_ops():
    br = mechanize.Browser()
    br.set_handle_robots(False)
    br.addheaders = [('user-agent', select_UA()), ('accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')]

    try:
        response = br.open("http://www.public-dns.info/nameservers.txt")
    except Exception as e:
        print "\n[" + t.red("!") + "]Critical, could not open public-dns.info"
        print "[" + t.green("+") + "]The following status code was recieved:"
        print "\n %s" % (e)
        sys.exit(1)

    result = response.read()
    proc = result.rstrip().split('\n')

    return proc


# If args, read list, else fetch
duckduckgo.py 文件源码 项目:pydata_webscraping 作者: jmortega 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def create_browser():
    br = mechanize.Browser()           # Create basic browser
    cj = cookielib.LWPCookieJar()      # Create cookiejar to handle cookies
    br.set_cookiejar(cj)               # Set cookie jar for our browser
    br.set_handle_equiv(True)          # Allow opening of certain files
    br.set_handle_gzip(True)           # Allow handling of zip files
    br.set_handle_redirect(True)       # Automatically handle auto-redirects
    br.set_handle_referer(True)
    br.set_handle_robots(False)        # ignore anti-robots.txt

    # Necessary headers to simulate an actual browser
    br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'),
                   ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
                   ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.3'),
                   ('Accept-Encoding', 'gzip,deflate,sdch'),
                   ('Accept-Language', 'en-US,en;q=0.8,fr;q=0.6'),
                   ('Connection', 'keep-alive')
                  ]
    return br
translateGoogle.py 文件源码 项目:pydata_webscraping 作者: jmortega 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def translate(home_language,target_language,text):
    text = text.replace(" ","%20");

    get_url = "https://translate.google.com/?sl="+home_language+"&tl="+target_language+"&text="+text
    #print get_url
    browser = mechanize.Browser()
    # Disable loading robots.txt
    browser.set_handle_robots(False)

    browser.addheaders = [('User-agent',
                     'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98;)')]

    translated_text = browser.open(get_url)
    translated_text = translated_text.read().decode('UTF-8')

    soup = BeautifulSoup(translated_text, "lxml")
    div_content = soup.find('div', {'id' : 'gt-res-content'})
    converted_text = div_content.find('span', {'id':'result_box'}).text

    return converted_text
mod.py 文件源码 项目:brutat 作者: el2t 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, site, passp, proxyp, timeout, passlist, proxylist, proxylen, username, time, br = mechanize.Browser()):

        self.site = site
        self.passp = passp
        self.proxyp = proxyp
        self.proxylist = proxylist
        self.proxylen = proxylen
        self.passlist = passlist
        self.username = username
        self.time = time
        self.br = br
        self.timeout = timeout



    # configure mechanize Browser
LAN.py 文件源码 项目:various_stuff 作者: oujezdsky 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def getInfo(ipaddr, userAgent, proxz, hostname):
    WEBFORM_NAME = 'search'
    browser = mechanize.Browser()
    browser.set_handle_robots(False)
    browser.set_handle_equiv(True)
    browser.set_handle_referer(True)
    browser.set_handle_redirect(True)
    browser.addheaders = userAgent
    # browser.set_proxies(proxz)
    cookie_jar = cookielib.LWPCookieJar()
    browser.set_cookiejar(cookie_jar)
    page = browser.open('https://apps.db.ripe.net/search/query.html')
    for form in browser.forms():
        if form.name == WEBFORM_NAME:
            browser.select_form(WEBFORM_NAME)
            browser.form['search:queryString'] = ipaddr
            browser.form['search:sources'] = ['GRS']
            submission = browser.submit().read()
            parsed_submission = BeautifulSoup(submission, 'html.parser')
            print ipaddr, '/',hostname
            for mainIndex in parsed_submission.find_all('ul', {'class': 'attrblock'}):
                for i, li in enumerate(mainIndex.findAll('li')):
                    if i in range(0, 2):
                        print '[+] ', li.text
            print '\n ########## \n'
bodegaMECH.py 文件源码 项目:OpenCouture-Dev 作者: 9-9-0 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self):
        self.br = mechanize.Browser()
        #self.cj = cookielib.LWPCookieJar()
        self.cj = cookielib.MozillaCookieJar()

        self.br.set_cookiejar(self.cj)
        self.br.set_handle_equiv(True)
        self.br.set_handle_referer(True)
        self.br.set_handle_robots(False)
        self.br.addheaders = [('User-agent', 'Firefox')]

        self.item_url = 'http://shop.bdgastore.com/collections/footwear/products/y-3-pureboost-zg'

        # Create variables for user credentials and a function to import them
input_emojis.py 文件源码 项目:slack-shogi 作者: setokinto 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def input_emojis(id_, password, team_id, two_factor, force_update=False):
    br = mechanize.Browser()
    br.set_handle_robots(False)
    br.open("https://{}.slack.com/".format(team_id))
    br.select_form(nr=0)
    br["email"] = id_
    br["password"] = password
    br.submit()
    if two_factor:
        br.select_form(nr=0)
        br["2fa_code"] = two_factor
        br.submit()

    count = 0
    for file_name in emojis:
        emoji_name = emojis[file_name]
        response = br.open(
            "https://{}.slack.com/customize/emoji".format(team_id))
        if response.read().find(emoji_name) >= 0 and not force_update:
            # Simple resume. Does it work?
            # FIXME: Use beautiful soup and search it using dom
            print("{}/{} skipped(already exists for the name '{}')".format(count,
                                                                           len(emojis), emoji_name))
            continue
        br.select_form(nr=0)
        br["name"] = emoji_prefix + emoji_name
        br.form.add_file(open(file_name), "images/png", file_name, name="img")
        br.submit()
        count += 1
        print("{}/{} completed".format(count, len(emojis)))
        time.sleep(1)
pesaply.py 文件源码 项目:sarafu 作者: pesaply 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, account, pin, browser=None):
        """
        In some occasions where you'll make a number of requests
        to the server, you will want to store the mechanize browser
        object in some cache so it can be reused.
        This has the advantage of reducing the number of requests
        necessary to complete given tasks.
        The browser object can simply be created this way:
        > browser = mechanize.Browser()
        """
        self.account = account
        self.pin = pin
        self.br = browser or mechanize.Browser()
        self.br.set_handle_robots(False)
scraper.py 文件源码 项目:pi_romulus 作者: ArthurMoore85 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, rom=None, parent=None):
        """
        Scrapes a website
        :param search_query: User search query
        """
        self.parent = parent
        self.url = 'http://www.emuparadise.me'
        self.search_q = rom
        self.browser = mechanize.Browser()
rpautogen.py 文件源码 项目:nthu-ee-progreport-autogen 作者: HW-Lee 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, usrvar):
        if "vpn-username" in usrvar.keys() and "vpn-userpwd" in usrvar.keys():
            self.vpn = {"usr": usrvar["vpn-username"], "pwd": usrvar["vpn-userpwd"]}
        else:
            self.vpn = None
        self.rooturl = None
        self.usrname = usrvar["username"]
        self.usrpwd = usrvar["userpwd"]
        self.stuname = usrvar["studentname"]
        self.br = mechanize.Browser()
        self.br.set_handle_robots(False)
clickFraud.py 文件源码 项目:PythonP2PBotnet 作者: jhoward321 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def attack(address, htmlObject):
    br = Browser()
    br.open(address)

    br.click(htmlObject)
Opener.py 文件源码 项目:DistributeCrawler 作者: SmallHedgehog 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __init__(self, cookie_filename=None, user_agent=None, timeout=None, **kwargs):
        try:
            import mechanize
        except ImportError:
            raise DependencyNotInstalledError('mechanize')

        if user_agent is None:
            user_agent = 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)'

        self.browser = mechanize.Browser()
        self.cj = cookielib.LWPCookieJar()
        if cookie_filename is not None:
            self.cj.load(cookie_filename)
        self.browser.set_cookiejar(self.cj)
        self.browser.set_handle_equiv(True)
        self.browser.set_handle_gzip(True)
        self.browser.set_handle_redirect(True)
        self.browser.set_handle_referer(True)
        self.browser.set_handle_robots(False)
        self.browser.addheaders = [
            ('User-agnet', user_agent)
        ]

        if timeout is None:
            # self._default_timout = mechanize._sockettimeout._GLOBAL_DEFAULT_TIMEOUT
            self._default_timout = 5
        else:
            self._default_timout = timeout
anon.py 文件源码 项目:Trity 作者: toxic-ig 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def anon():
    br = mechanize.Browser()

    to = raw_input(R + "Enter the recipient address: ")
    subject = raw_input("Enter the subject: ")
    print "Message: "
    message = raw_input(">")

    #proxy = "http://127.0.0.1:8080"

    url = "http://anonymouse.org/anonemail.html"
    headers = "Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)"
    br.addheaders = [('User-agent', headers)]
    br.open(url)
    br.set_handle_equiv(True)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_debug_http(False)
    br.set_debug_redirects(False)
    #br.set_proxies({"http": proxy})

    br.select_form(nr=0)

    br.form['to'] = to
    br.form['subject'] = subject
    br.form['text'] = message

    result = br.submit()

    response = br.response().read()


    if "The e-mail has been sent anonymously!" in response:
        print "The email has been sent successfully!! \n The recipient will get it in up to 12 hours!"
    else:
        print "Failed to send email!"
spotlight.py 文件源码 项目:studentLogin_API_Flask 作者: PiyushGoyal443 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self):

        self.br = mechanize.Browser()
        self.br.set_handle_robots(False)
        self.br.set_handle_equiv(True)
        self.br.set_handle_gzip(True)
        self.br.set_handle_redirect(True)
        self.br.set_handle_referer(True)

    #for getting academic spotlight
Project-ET.py 文件源码 项目:Project-ET 作者: p0rt22 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def anonymousEmail(to, subject, message):
        br = mechanize.Browser()
        url = 'http://anonymouse.org/anonemail.html'
        headers = 'Mozilla/4.0 (compatible; MSIE 5.0; AOL 4.0; Windows 95; c_athome)'
        br.addheaders = [('User-agent', headers)]
        br.open(url)
        br.set_handle_equiv(True)
        br.set_handle_gzip(True)
        br.set_handle_redirect(True)
        br.set_handle_referer(True)
        br.set_handle_robots(False)
        br.set_debug_http(False)
        br.set_debug_redirects(False)

        br.select_form(nr=0)

        br.form['to'] = to
        br.form['subject'] = subject
        br.form['text'] = message

        result = br.submit()
        response = br.response().read()
        # fills all the forms on the website

        if 'The e-mail has been sent anonymously!' in response:
            print 'Success, the email will be sent shortly'
            pause()
            main()

        else:
            print 'Email failed to send'
            pause()
            main()
        # checks response from website
hoster.py 文件源码 项目:cloudomate 作者: Jaapp- 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _create_browser():
        br = Browser()
        br.set_handle_robots(False)
        br.addheaders = [('User-agent', random.choice(user_agents))]
        return br
wallet.py 文件源码 项目:cloudomate 作者: Jaapp- 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _get_network_cost(speed):
    br = Browser()
    br.addheaders = [('User-Agent', 'Firefox')]
    page = br.open('https://bitcoinfees.21.co/api/v1/fees/recommended')
    rates = json.loads(page.read())
    satoshirate = float(rates[speed])
    return satoshirate


问题


面经


文章

微信
公众号

扫码关注公众号