python类install_opener()的实例源码

grabFacebookData.py 文件源码 项目:facebook-group-scrape 作者: mchirico 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def getJSONfromURL(url):
    user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
    values = {}
    headers = { 'Authorization':  BEAR}
    h = MyHTTPRedirectHandler()
    opener = urllib2.build_opener(h)
    urllib2.install_opener(opener)
    data = urllib.urlencode(values)
    json_data = ""
    try:
        req = urllib2.build_opener(h)
        req = urllib2.Request(url)
        response = urllib2.urlopen(req)
        the_page = response.read()
        json_data = json.loads(the_page)
    except:
        print("Error reading data members")
    return json_data
grabFacebookData.py 文件源码 项目:facebook-group-scrape 作者: mchirico 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def getJson(limit=13,since='2016-05-25',until='2016-05-26'):
    url='https://graph.facebook.com/v2.5/'+FACEBOOK_GROUP+'/feed?fields=reactions.limit(500){link,name,pic_square,type},message,name,id,created_time,permalink_url,shares,comments.limit(500){created_time,likes.limit(500),message,from,comments.limit(507){likes,message,from,created_time}},from&limit=%s&since=%s&until=%s&access_token=%s' % (limit,since,until,TOKEN)
    user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
    values = {}
    headers = { 'Authorization':  BEAR}
    h = MyHTTPRedirectHandler()
    opener = urllib2.build_opener(h)
    urllib2.install_opener(opener)
    data = urllib.urlencode(values)
    json_data = ""
    try:
        req = urllib2.build_opener(h)
        req = urllib2.Request(url)
        response = urllib2.urlopen(req)
        the_page = response.read()
        json_data = json.loads(the_page)
    except:
        print("Error reading data")
    return json_data
httphandler.py 文件源码 项目:lightbulb-framework 作者: lightbulb-framework 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, configuration):
        self.setup(configuration)
        self.echo = None
        if "ECHO" in configuration:
            self.echo = configuration['ECHO']
        if self.proxy_scheme is not None and self.proxy_host is not None and \
                        self.proxy_port is not None:
            credentials = ""
            if self.proxy_username is not None and self.proxy_password is not None:
                credentials = self.proxy_username + ":" + self.proxy_password + "@"
            proxyDict = {
                self.proxy_scheme: self.proxy_scheme + "://" + credentials +
                                                    self.proxy_host + ":" + self.proxy_port
            }

            proxy = urllib2.ProxyHandler(proxyDict)

            if credentials != '':
                auth = urllib2.HTTPBasicAuthHandler()
                opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
            else:
                opener = urllib2.build_opener(proxy)
            urllib2.install_opener(opener)
Youdao-Anki.py 文件源码 项目:Anki-Youdao 作者: megachweng 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def totalPage(self):

        self.loadedCookies = self.loadCookies()
        if not self.loadedCookies:
            return False
        # page index start from 0 end at max-1
        req = urllib2.Request('http://dict.youdao.com/wordbook/wordlist?p=0&tags=')
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.loadedCookies))
        urllib2.install_opener(opener)
        response = urllib2.urlopen(req)
        source = response.read()
        if '????' in source:
            return False
        else:
            try:
                return int(re.search('<a href="wordlist.p=(.*).tags=" class="next-page">????</a>', source, re.M | re.I).group(1)) - 1
            except Exception:
                return 1
cas.py 文件源码 项目:etunexus_api 作者: etusolution 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _init_urllib(self, secure, debuglevel=0):
        cj = cookielib.CookieJar()
        no_proxy_support = urllib2.ProxyHandler({})
        cookie_handler = urllib2.HTTPCookieProcessor(cj)
        ctx = None
        if not secure:
            self._logger.info('[WARNING] Skip certificate verification.')
            ctx = ssl.create_default_context()
            ctx.check_hostname = False
            ctx.verify_mode = ssl.CERT_NONE
        https_handler = urllib2.HTTPSHandler(debuglevel=debuglevel, context=ctx)
        opener = urllib2.build_opener(no_proxy_support,
                                      cookie_handler,
                                      https_handler,
                                      MultipartPostHandler.MultipartPostHandler)
        opener.addheaders = [('User-agent', API_USER_AGENT)]
        urllib2.install_opener(opener)
poc_2014_0052.py 文件源码 项目:kekescan 作者: xiaoxiaoleo 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def verify(cls, args):
        cookie = cookielib.CookieJar()
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
        urllib2.install_opener(opener)
        postdata = "_SESSION[login_in]=1&_SESSION[admin]=1&_SESSION[login_time]=300000000000000000000000\r\n"
        # get session
        request = urllib2.Request(args['options']['target'] + "/index.php", data=postdata)
        r = urllib2.urlopen(request)
        # login test
        request2 = urllib2.Request(args['options']['target'] + "/admin/admin.php", data=postdata)
        r = urllib2.urlopen(request2)
        content = r.read()
        if "admin_form.php?action=form_list&nav=list_order" in content:
            if "admin_main.php?nav=main" in content:
                args['success'] = True
                args['test_method'] = 'http://www.wooyun.org/bugs/wooyun-2014-059180'
                return args
        args['success'] = False
        return args
views.py 文件源码 项目:zacui 作者: yoyopie 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def index(request):
    if request.method == "GET":
        try:
            ssl._create_default_https_context = ssl._create_unverified_context

            opener = wdf_urllib.build_opener(
                wdf_urllib.HTTPCookieProcessor(CookieJar()))
            wdf_urllib.install_opener(opener)
        except:
            pass
        uuid = getUUID()
        url = 'https://login.weixin.qq.com/qrcode/' + uuid
        params = {
            't': 'webwx',
            '_': int(time.time()),
        }

        request = getRequest(url=url, data=urlencode(params))
        response = wdf_urllib.urlopen(request)
        context = {
            'uuid': uuid,
            'response': response.read(),
            'delyou': '',
            }
        return render_to_response('index.html', context)
wsse.py 文件源码 项目:TigerHost 作者: naphatkrit 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def setup_wsse_handler(base_url, username, password, preempt = True):
  """
  Configure urllib2 to try/use WSSE authentication, with a specific
  `username` and `password` when visiting any page that have a given
  `base_url`. Once this function has been called, all future requests
  through urllib2 should be able to handle WSSE authentication.
  """

  # Create a password manager
  passman = urllib2.HTTPPasswordMgrWithDefaultRealm()

  # Add username/password for domain defined by base_url
  passman.add_password(None, base_url, username, password)

  # Create the auth handler and install it in urllib2
  authhandler = WSSEAuthHandler(passman, preempt = preempt)
  opener = urllib2.build_opener(authhandler)
  urllib2.install_opener(opener)


# Example of how to use without handlers
keepalive.py 文件源码 项目:Eagle 作者: magerx 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason)
    HANDLE_ERRORS = orig
    hosts = keepalive_handler.open_connections()
    print "open connections:", ' '.join(hosts)
    keepalive_handler.close_all()
keepalive.py 文件源码 项目:Eagle 作者: magerx 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def comp(N, url):
    print '  making %i connections to:\n  %s' % (N, url)

    sys.stdout.write('  first using the normal urllib handlers')
    # first use normal opener
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    t1 = fetch(N, url)
    print '  TIME: %.3f s' % t1

    sys.stdout.write('  now using the keepalive handler       ')
    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)
    t2 = fetch(N, url)
    print '  TIME: %.3f s' % t2
    print '  improvement factor: %.2f' % (t1/t2, )
PayloadSender.py 文件源码 项目:Eagle 作者: magerx 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def send_common_request(url, is_post, cookie, para=''):
    """
    ?????WEB???????
    :url:       ??URL
    :is_post:   ???POST
    :cookie:    cookie
    """
    headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:42.0) Gecko/20100101 Firefox/42.0',
               'Cookie': cookie
               }
    # dns cache
    # socket.getaddrinfo = new_getaddrinfo

    try:
        encoding_support = ContentEncodingProcessor()
        opener = urllib2.build_opener(encoding_support, urllib2.HTTPHandler)
        urllib2.install_opener(opener)
        if is_post == 2:  # post
            # url, query = url.split('?', 1)
            return urllib2.urlopen(urllib2.Request(url, para, headers=headers)).read()
        else:
            return urllib2.urlopen(urllib2.Request('?'.join([url, para]), headers=headers)).read()
    except:
        return ''
keepalive.py 文件源码 项目:Helix 作者: 3lackrush 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason)
    HANDLE_ERRORS = orig
    hosts = keepalive_handler.open_connections()
    print "open connections:", hosts
    keepalive_handler.close_all()
keepalive.py 文件源码 项目:Helix 作者: 3lackrush 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def comp(N, url):
    print '  making %i connections to:\n  %s' % (N, url)

    sys.stdout.write('  first using the normal urllib handlers')
    # first use normal opener
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    t1 = fetch(N, url)
    print '  TIME: %.3f s' % t1

    sys.stdout.write('  now using the keepalive handler       ')
    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)
    t2 = fetch(N, url)
    print '  TIME: %.3f s' % t2
    print '  improvement factor: %.2f' % (t1/t2, )
keepalive.py 文件源码 项目:autoscan 作者: b01u 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def error_handler(url):
    global HANDLE_ERRORS
    orig = HANDLE_ERRORS
    keepalive_handler = HTTPHandler()
    opener = urllib2.build_opener(keepalive_handler)
    urllib2.install_opener(opener)
    pos = {0: 'off', 1: 'on'}
    for i in (0, 1):
        print "  fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
        HANDLE_ERRORS = i
        try:
            fo = urllib2.urlopen(url)
            foo = fo.read()
            fo.close()
            try: status, reason = fo.status, fo.reason
            except AttributeError: status, reason = None, None
        except IOError, e:
            print "  EXCEPTION: %s" % e
            raise
        else:
            print "  status = %s, reason = %s" % (status, reason)
    HANDLE_ERRORS = orig
    hosts = keepalive_handler.open_connections()
    print "open connections:", ' '.join(hosts)
    keepalive_handler.close_all()
keepalive.py 文件源码 项目:autoscan 作者: b01u 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def comp(N, url):
    print '  making %i connections to:\n  %s' % (N, url)

    sys.stdout.write('  first using the normal urllib handlers')
    # first use normal opener
    opener = urllib2.build_opener()
    urllib2.install_opener(opener)
    t1 = fetch(N, url)
    print '  TIME: %.3f s' % t1

    sys.stdout.write('  now using the keepalive handler       ')
    # now install the keepalive handler and try again
    opener = urllib2.build_opener(HTTPHandler())
    urllib2.install_opener(opener)
    t2 = fetch(N, url)
    print '  TIME: %.3f s' % t2
    print '  improvement factor: %.2f' % (t1/t2, )
httproxychecker.py 文件源码 项目:tools 作者: okabe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def checker():
    while True:
        if proxyq.empty() is not True:
            proxy = "http://{}".format( proxyq.get() )
            url = "http://icanhazip.com"
            proxy_handler = urllib2.ProxyHandler( { "http" : proxy } )
            opener = urllib2.build_opener( proxy_handler )
            urllib2.install_opener( opener )
            printq.put( "[>] Trying {}".format( proxy ) )
            try:
                response = urllib2.urlopen( url, timeout=3 ).readlines()
                for line in response:
                    if line.rstrip( "\n" ) in proxy:
                        printq.put( "[+] Working proxy: {}".format( proxy ) )
                        with open( "working.txt", "a" ) as log:
                            log.write( "{}\n".format( proxy ) )
                        log.close()
            except Exception as ERROR:
                printq.put( "[!] Bad proxy: {}".format( proxy ) )
            proxyq.task_done()
sqlScan.py 文件源码 项目:00scanner 作者: xiaoqin00 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def init_options(proxy=None, cookie=None, ua=None, referer=None):
    globals()["_headers"] = dict(filter(lambda _: _[1], ((COOKIE, cookie), (UA, ua or NAME), (REFERER, referer))))
    urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler({'http': proxy})) if proxy else None)

# if __name__ == "__main__":
#     print "%s #v%s\n by: %s\n" % (NAME, VERSION, AUTHOR)
#     parser = optparse.OptionParser(version=VERSION)
#     parser.add_option("-u", "--url", dest="url", help="Target URL (e.g. \"http://www.target.com/page.php?id=1\")")
#     parser.add_option("--data", dest="data", help="POST data (e.g. \"query=test\")")
#     parser.add_option("--cookie", dest="cookie", help="HTTP Cookie header value")
#     parser.add_option("--user-agent", dest="ua", help="HTTP User-Agent header value")
#     parser.add_option("--referer", dest="referer", help="HTTP Referer header value")
#     parser.add_option("--proxy", dest="proxy", help="HTTP proxy address (e.g. \"http://127.0.0.1:8080\")")
#     options, _ = parser.parse_args()
#     if options.url:
#         init_options(options.proxy, options.cookie, options.ua, options.referer)
#         result = scan_page(options.url if options.url.startswith("http") else "http://%s" % options.url, options.data)
#         print "\nscan results: %s vulnerabilities found" % ("possible" if result else "no")
#     else:
#         parser.print_help()
test_functional.py 文件源码 项目:mechanize 作者: python-mechanize 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setUp(self):
        mechanize._testcase.TestCase.setUp(self)
        self.test_uri = urljoin(self.uri, "test_fixtures")
        self.server = self.get_cached_fixture("server")
        if self.no_proxies:
            old_opener_m = mechanize._opener._opener
            old_opener_u = urllib2._opener
            mechanize.install_opener(mechanize.build_opener(
                mechanize.ProxyHandler(proxies={})))
            urllib2.install_opener(urllib2.build_opener(
                urllib2.ProxyHandler(proxies={})))

            def revert_install():
                mechanize.install_opener(old_opener_m)
                urllib2.install_opener(old_opener_u)
            self.add_teardown(revert_install)


问题


面经


文章

微信
公众号

扫码关注公众号