python类HTTPCookieProcessor()的实例源码

transport.py 文件源码 项目:touch-pay-client 作者: HackPucBemobi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, timeout=None, proxy=None, cacert=None, sessions=False):
        if (timeout is not None) and not self.supports_feature('timeout'):
            raise RuntimeError('timeout is not supported with urllib2 transport')
        if proxy:
            raise RuntimeError('proxy is not supported with urllib2 transport')
        if cacert:
            raise RuntimeError('cacert is not support with urllib2 transport')

        handlers = []

        if ((sys.version_info[0] == 2 and sys.version_info >= (2,7,9)) or
            (sys.version_info[0] == 3 and sys.version_info >= (3,2,0))):
            context = ssl.create_default_context()
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
            handlers.append(urllib2.HTTPSHandler(context=context))

        if sessions:
            handlers.append(urllib2.HTTPCookieProcessor(CookieJar()))

        opener = urllib2.build_opener(*handlers)
        self.request_opener = opener.open
        self._timeout = timeout
scholar.py 文件源码 项目:google_scholar_paper_finder 作者: maikelronnau 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
scholar.py 文件源码 项目:citations 作者: frederick0329 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
spider.py 文件源码 项目:CN_POI_Data 作者: lyBigdata 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self,proxyHost = ""):
        #???headers
        self.headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36',
                        'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
                        'Accept-Encoding':'en-us',
                        'Connection':'keep-alive',
                        'Referer':'http://www.baidu.com/'}

        self.proxyHeaders = [('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36'),
                             ('Accept-Charset','ISO-8859-1,utf-8;q=0.7,*;q=0.3'),
                             ('Accept-Encoding','en-us'),
                             ('Connection','keep-alive'),
                             ('Referer','http://www.baidu.com/')]

        self.cookies = urllib2.HTTPCookieProcessor()
        self.req_timeout = 5

        self.proxyHost = {"http":proxyHost}
proxy.py 文件源码 项目:CN_POI_Data 作者: lyBigdata 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def checkAlive(self,ip,port,protocol):
        testUrl = "https://www.baidu.com/"
        req_timeout = 3
        cookies = urllib2.HTTPCookieProcessor()

        proxyHost = ""
        if protocol == 'HTTP' or protocol == 'HTTPS':
            proxyHost = {"http":r'http://%s:%s' % (ip, port)}
            #print proxyHost

        proxyHandler = urllib2.ProxyHandler(proxyHost)
        opener = urllib2.build_opener(cookies, proxyHandler)
        opener.addheaders = [('User-Agent',
                              'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]

        try:
            req = opener.open(testUrl, timeout=req_timeout)
            result = req.read()
            #print result
            gevent.sleep(2)
            return  True
        except urllib2.HTTPError as e:
            print  e.message
            return False
is_google_connect.py 文件源码 项目:auto-laod-hosts 作者: yanjinyi1987 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def urlopen_test(host):
    headers = [('Host',host),
    ('Connection', 'keep-alive'),
    ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
    ('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'),
    #('Accept-Encoding','gzip,deflate'), 
    ('Accept-Language', 'en-US,en;q=0.5')]

    #????MozillaCookieJar???????cookie
    cookie=cookielib.MozillaCookieJar()
    handler=urllib2.HTTPCookieProcessor(cookie)

    req=urllib2.Request(u'https://'+host)
    first_opener = urllib2.build_opener(handler)
    first_opener.addheaders = headers
    try:
        result=first_opener.open(req,timeout=60) #60s??
        if result.read()!=None:
            return True
    except Exception,e:
        print e
        return False
index_spider.py 文件源码 项目:wechat-crawler 作者: DMGbupt 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def get_cookie(self,query):
        """
        @summary: ??cookie
        @param query: ?????????
        @return: ??cookie??
        """
        cookies={}
        i=0
        while True:
            cookie = cookielib.CookieJar()
            handler=urllib2.HTTPCookieProcessor(cookie)
            opener = urllib2.build_opener(handler)
            response = opener.open(self._search_url.format(query)) # ???????????????cookie
            for item in cookie:
                # ???????????cookie????SNUID?????????????
                # ???SNUID???cookie????cookie???????????
                if("SNUID" in item.name):
                    cookies[item.name]=item.value
                    return cookies
            if(i>3):
                # ????3???????????cookie????IP????????????cookie??????????
                spider_logger.error("Can't get cookies when searching {0} !".format(query))
                return cookies
            i=i+1
            time.sleep(10*random.expovariate(1)) # ?????????????????????????????
captcha_handler.py 文件源码 项目:PySide_For_Amazon_Order 作者: cundi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, user, pwd, softId="110614",
                 softKey="469c0d8a805a40f39d3c1ec3c9281e9c",
                 codeType="1004"):
        self.softId = softId
        self.softKey = softKey
        self.user = user
        self.pwd = pwd
        self.codeType = codeType
        self.uid = "100"
        self.initUrl = "http://common.taskok.com:9000/Service/ServerConfig.aspx"
        self.version = '1.1.1.2'
        self.cookieJar = cookielib.CookieJar()
        self.opener = urllib2.build_opener(
            urllib2.HTTPCookieProcessor(self.cookieJar))
        self.loginUrl = None
        self.uploadUrl = None
        self.codeUrl = None
        self.params = []
        self.uKey = None
__Threading.py 文件源码 项目:-scrapy- 作者: PyCN 项目源码 文件源码 阅读 76 收藏 0 点赞 0 评论 0
def get_url(self, url, headers, cookie):

        if not isinstance(url, str):
            raise 'url or cookie type error!!'
        req = urllib2.Request(url, None, headers)
        try:
            if not isinstance(cookie, cookielib.CookieJar) and cookie is None:
                response = urllib2.urlopen(req)
            elif not isinstance(cookie, cookielib.CookieJar) and cookie is not None:
                cookie = cookielib.CookieJar()
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)
            elif isinstance(cookie, cookielib.CookieJar):
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)

        except urllib2.HTTPError:
            raise 'get url error!!'
        the_page = response.read()

        return the_page, cookie
__Threading.py 文件源码 项目:-scrapy- 作者: PyCN 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def post_url(self, url, formdata, headers, cookie):
        if not isinstance(url, str):
            raise 'url must be a string and fordata must be a dict'
        data = urllib.urlencode(formdata)
        req = urllib2.Request(url, data, headers)
        try:
            if not isinstance(cookie, cookielib.CookieJar) and cookie is None:
                response = urllib2.urlopen(req)
            elif not isinstance(cookie, cookielib.CookieJar) and cookie is not None:
                cookie = cookielib.CookieJar()
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)
            elif isinstance(cookie, cookielib.CookieJar):
                handler = urllib2.HTTPCookieProcessor(cookie)
                opener = urllib2.build_opener(handler)
                response = opener.open(req)

        except urllib2.HTTPError:
            raise 'get url error!!'
        the_page = response.read()

        return the_page, cookie
transport.py 文件源码 项目:rekall-agent-server 作者: rekall-innovations 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, timeout=None, proxy=None, cacert=None, sessions=False):
        if (timeout is not None) and not self.supports_feature('timeout'):
            raise RuntimeError('timeout is not supported with urllib2 transport')
        if proxy:
            raise RuntimeError('proxy is not supported with urllib2 transport')
        if cacert:
            raise RuntimeError('cacert is not support with urllib2 transport')

        handlers = []

        if ((sys.version_info[0] == 2 and sys.version_info >= (2,7,9)) or
            (sys.version_info[0] == 3 and sys.version_info >= (3,2,0))):
            context = ssl.create_default_context()
            context.check_hostname = False
            context.verify_mode = ssl.CERT_NONE
            handlers.append(urllib2.HTTPSHandler(context=context))

        if sessions:
            handlers.append(urllib2.HTTPCookieProcessor(CookieJar()))

        opener = urllib2.build_opener(*handlers)
        self.request_opener = opener.open
        self._timeout = timeout
scholar.py 文件源码 项目:snowballing 作者: JoaoFelipe 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
cas.py 文件源码 项目:etunexus_api 作者: etusolution 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _init_urllib(self, secure, debuglevel=0):
        cj = cookielib.CookieJar()
        no_proxy_support = urllib2.ProxyHandler({})
        cookie_handler = urllib2.HTTPCookieProcessor(cj)
        ctx = None
        if not secure:
            self._logger.info('[WARNING] Skip certificate verification.')
            ctx = ssl.create_default_context()
            ctx.check_hostname = False
            ctx.verify_mode = ssl.CERT_NONE
        https_handler = urllib2.HTTPSHandler(debuglevel=debuglevel, context=ctx)
        opener = urllib2.build_opener(no_proxy_support,
                                      cookie_handler,
                                      https_handler,
                                      MultipartPostHandler.MultipartPostHandler)
        opener.addheaders = [('User-agent', API_USER_AGENT)]
        urllib2.install_opener(opener)
scholar_api.py 文件源码 项目:KDDCUP2016 作者: hugochan 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
views.py 文件源码 项目:zacui 作者: yoyopie 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def index(request):
    if request.method == "GET":
        try:
            ssl._create_default_https_context = ssl._create_unverified_context

            opener = wdf_urllib.build_opener(
                wdf_urllib.HTTPCookieProcessor(CookieJar()))
            wdf_urllib.install_opener(opener)
        except:
            pass
        uuid = getUUID()
        url = 'https://login.weixin.qq.com/qrcode/' + uuid
        params = {
            't': 'webwx',
            '_': int(time.time()),
        }

        request = getRequest(url=url, data=urlencode(params))
        response = wdf_urllib.urlopen(request)
        context = {
            'uuid': uuid,
            'response': response.read(),
            'delyou': '',
            }
        return render_to_response('index.html', context)
scholar.py 文件源码 项目:slack_scholar 作者: xLeitix 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self):
        self.articles = []
        self.query = None
        self.cjar = MozillaCookieJar()

        # If we have a cookie file, load it:
        if ScholarConf.COOKIE_JAR_FILE and \
           os.path.exists(ScholarConf.COOKIE_JAR_FILE):
            try:
                self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
                               ignore_discard=True)
                print "Using cookie file"
                ScholarUtils.log('info', 'loaded cookies file')
            except Exception as msg:
                print "Ignoring cookie file: %s" % msg
                ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
                self.cjar = MozillaCookieJar() # Just to be safe

        self.opener = build_opener(HTTPCookieProcessor(self.cjar))
        self.settings = None # Last settings object, if any
cookie_util.py 文件源码 项目:sogouWechart 作者: duanbj 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def proxy_identify(proxy, url):
        cookie = cookielib.LWPCookieJar()
        handler = urllib2.HTTPCookieProcessor(cookie)
        proxy_support = urllib2.ProxyHandler({'http': proxy})
        opener = urllib2.build_opener(proxy_support, handler)
        try:
            response = opener.open(url, timeout=3)
            if response.code == 200:
                c = ''
                for item in cookie:
                    c += item.name+'='+item.value+';'
                print c
                IpProxy.sogou_cookie.append(c)
                return True
        except Exception, error:
            print error
            return False
google.py 文件源码 项目:autoscan 作者: b01u 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, handlers):
        self._cj = cookielib.CookieJar()

        handlers.append(urllib2.HTTPCookieProcessor(self._cj))
        handlers.append(HTTPSHandler())

        self.opener = urllib2.build_opener(*handlers)
        self.opener.addheaders = conf.httpHeaders

        try:
            conn = self.opener.open("http://www.google.com/ncr")
            conn.info()  # retrieve session cookie
        except urllib2.HTTPError, e:
            e.info()
        except urllib2.URLError:
            errMsg = "unable to connect to Google"
            raise SqlmapConnectionException(errMsg)
sendsms.py 文件源码 项目:awesome-hacking-via-python 作者: shashi12533 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def login(uname,passwd):
    global logging
    global o
    global confget
    global filecookiejar
    logging.debug("Logging using url: %s" % confget('Auth','logincheck'))
    login_encode=urllib.urlencode({'MobileNoLogin':uname, 'LoginPassword':passwd})
    logging.debug("login_encode:%s" % login_encode)
    cookieprocessor=urllib2.HTTPCookieProcessor() #new cookie processor
    o = urllib2.build_opener(cookieprocessor) # a new urlopener
    f = tryopen(o,confget('Auth','logincheck'),login_encode)
    logging.debug("Sent Login information, got the following return URL: %s", f.geturl())
    if f.read().find(confget('Auth','logindone')) != -1:
        #save cookies
        cj=cookieprocessor.cookiejar
        cookie=enumerate(cj).next()[1]
        logging.debug("New Cookie:%s:" % cookie)
        filecookiejar.set_cookie(cookie)
        filecookiejar.save(ignore_discard=True)
        logging.debug("Cookies saved in %s" % filecookiejar.filename)
        return True
    else:
        return False
sitecopy.py 文件源码 项目:TinyStockToolKit 作者: lawinse 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self,timeout=10,threads=None,stacksize=32768*16,loginfunc=None):
        #proxy_support = urllib2.ProxyHandler({'http':'http://localhost:3128'})
        cookie_support = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
        encoding_support = ContentEncodingProcessor()
        #self.opener = urllib2.build_opener(cookie_support,encoding_support,proxy_support,urllib2.HTTPHandler)
        self.opener = urllib2.build_opener(cookie_support,encoding_support,urllib2.HTTPHandler)
        self.req = urllib2.Request('http://www.hsbc.com')
        socket.setdefaulttimeout(timeout)
        self.q_req = Queue()
        self.q_ans = Queue()
        self.lock = Lock()
        self.running = 0
        if loginfunc:
            self.opener = loginfunc(self.opener)
        if threads:
            self.threads = threads
            stack_size(stacksize)
            for i in range(threads):
                t = Thread(target=self.threadget)
                t.setDaemon(True)
                t.start()
douban.py 文件源码 项目:DoubanSpider 作者: ruiming 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self):
        # ????
        self.proxy_url = proxyList[3]
        self.proxy = urllib2.ProxyHandler({"http": self.proxy_url})
        # ??
        self.hostURL = 'http://book.douban.com/tag/'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.47 (KHTML, like Gecko)'
                          ' Chrome/48.1.2524.116 Safari/537.36',
            'Referer': 'http://book.douban.com/',
            'Host': 'book.douban.com',
            'Upgrade-Insecure-Requests': '1',
            'Connection': 'keep-alive'
        }
        # opener??
        self.cookie = cookielib.LWPCookieJar()
        self.cookieHandler = urllib2.HTTPCookieProcessor(self.cookie)
        self.opener = urllib2.build_opener(self.cookieHandler, self.proxy, urllib2.HTTPHandler)

    # ????????????
proxy.py 文件源码 项目:DoubanSpider 作者: ruiming 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def checkproxy(self):
        cookies = urllib2.HTTPCookieProcessor()
        for proxy in self.proxyList:
            proxyhandler = urllib2.ProxyHandler({"http": r'http://%s:%s' % (proxy[0], proxy[1])})
            opener = urllib2.build_opener(cookies, proxyhandler)
            opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
                                                '(KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36')]
            opener.addheaders = [('Referer', 'http://proxy.moo.jp/zh/')]
            t1 = time.time()
            try:
                request = opener.open(self.testURL, timeout=self.timeout)
                result = request.read()
                timeused = time.time() - t1
                pos = result.find(self.testStr)
                if pos > 1:
                    print r'success --http://%s:%s' % (proxy[0], proxy[1])
                    checkedProxyList.append((proxy[0], proxy[1]))
                else:
                    print r'fail    --http://%s:%s' % (proxy[0], proxy[1])
                    continue
            except Exception, e:
                print r'fail    --http://%s:%s' % (proxy[0], proxy[1])
                continue
http_client.py 文件源码 项目:clusterdock 作者: cloudera 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, base_url, exc_class=None, logger=None):
    """
    @param base_url: The base url to the API.
    @param exc_class: An exception class to handle non-200 results.

    Creates an HTTP(S) client to connect to the Cloudera Manager API.
    """
    self._base_url = base_url.rstrip('/')
    self._exc_class = exc_class or RestException
    self._logger = logger or LOG
    self._headers = { }

    # Make a basic auth handler that does nothing. Set credentials later.
    self._passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
    authhandler = urllib2.HTTPBasicAuthHandler(self._passmgr)

    # Make a cookie processor
    cookiejar = cookielib.CookieJar()

    self._opener = urllib2.build_opener(
        HTTPErrorProcessor(),
        urllib2.HTTPCookieProcessor(cookiejar),
        authhandler)
__init__.py 文件源码 项目:antares 作者: CONABIO 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def download_landsat_scene(url, directory, filename):
    '''
    This method downloads a scene directly from usgs. In order to do so, it
    pretends to be a browser to build a request that is accepted by the server.
    We added the headers so we don't get banned when the server detects that we
    are doing lots of requests. This idea is based on the landsat downloader:
    https://github.com/olivierhagolle/LANDSAT-Download
    '''
    cookies = urllib2.HTTPCookieProcessor()
    opener = urllib2.build_opener(cookies)
    urllib2.install_opener(opener)
    data=urllib2.urlopen("https://ers.cr.usgs.gov").read()
    token_group = re.search(r'<input .*?name="csrf_token".*?value="(.*?)"', data)
    if token_group:
        token = token_group.group(1)
    else:
        LOGGER.error('The cross site request forgery token was not found.')
        sys.exit(1)
    usgs = {'account':getattr(SETTINGS, 'USGS_USER'), 'passwd':getattr(SETTINGS, 'USGS_PASSWORD')}
    params = urllib.urlencode(dict(username=usgs['account'], password=usgs['passwd'], csrf_token=token))
    request = urllib2.Request("https://ers.cr.usgs.gov/login", params, headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'})
    f = urllib2.urlopen(request)
    data = f.read()
    f.close()    
    download_chunks(url, directory, filename)
recipe-578681.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def get_file(self, url, quality):

        self.cookieJar = cookielib.LWPCookieJar()

        self.opener = urllib2.build_opener(

            urllib2.HTTPCookieProcessor(self.cookieJar),
            urllib2.HTTPRedirectHandler(),
            urllib2.HTTPHandler(debuglevel=0))

        self.opener.addheaders = [('User-agent', "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36")]


        forms = {"youtubeURL": url,
                 'quality':quality

                 }

        data = urllib.urlencode(forms)
        req = urllib2.Request('http://www.convertmemp3.com/',data)
        res = self.opener.open(req)

        self.convhtml = res.read()
downloader.py 文件源码 项目:transfer 作者: viur-framework 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, baseURL):
        super(NetworkService, self).__init__()
        self.baseURL = baseURL
        cp = urllib2.HTTPCookieProcessor()
        self.opener = urllib2.build_opener(cp)
        urllib2.install_opener(self.opener)
viur.py 文件源码 项目:transfer 作者: viur-framework 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__( self,  baseURL ):
        super( NetworkService, self ).__init__()
        self.baseURL = baseURL
        cp = urllib2.HTTPCookieProcessor()
        self.opener = urllib2.build_opener( cp )
        urllib2.install_opener( self.opener )
option.py 文件源码 项目:darkc0de-old-stuff 作者: tuwid 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def __urllib2Opener():
    """
    This function creates the urllib2 OpenerDirector.
    """

    global authHandler
    global proxyHandler

    debugMsg = "creating HTTP requests opener object"
    logger.debug(debugMsg)

    conf.cj = cookielib.LWPCookieJar()
    opener  = urllib2.build_opener(proxyHandler, authHandler, urllib2.HTTPCookieProcessor(conf.cj))

    urllib2.install_opener(opener)
google.py 文件源码 项目:darkc0de-old-stuff 作者: tuwid 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, proxyHandler):
        self.__googleCookie = None
        self.__matches = []
        self.__cj = cookielib.LWPCookieJar()
        self.opener = urllib2.build_opener(proxyHandler, urllib2.HTTPCookieProcessor(self.__cj))
        self.opener.addheaders = conf.httpHeaders
login.py 文件源码 项目:weibo 作者: windskyer 项目源码 文件源码 阅读 62 收藏 0 点赞 0 评论 0
def login(self, username, pwd, cookie_file):
        """"
            Login with use name, password and cookies.
            (1) If cookie file exists then try to load cookies;
            (2) If no cookies found then do login
        """
        # If cookie file exists then try to load cookies
        if os.path.exists(cookie_file):
            try:
                cookie_jar = cookielib.LWPCookieJar(cookie_file)
                cookie_jar.load(ignore_discard=True, ignore_expires=True)
                loaded = 1
            except cookielib.LoadError:
                loaded = 0
                LOG.info('Loading cookies error')

            # install loaded cookies for urllib2
            if loaded:
                cookie_support = urllib2.HTTPCookieProcessor(cookie_jar)
                opener = urllib2.build_opener(cookie_support,
                                              urllib2.HTTPHandler)
                urllib2.install_opener(opener)
                LOG.info('Loading cookies success')
                return 1
            else:
                return self.do_login(username, pwd, cookie_file)

        else:  # If no cookies found
            return self.do_login(username, pwd, cookie_file)


问题


面经


文章

微信
公众号

扫码关注公众号