python类GET的实例源码

twitter.py 文件源码 项目:naziscore 作者: rbanffy 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_timeline_by_screen_name(screen_name):
    """Returns a dict from the Twitter GET statuses/user_timeline API.

    See https://dev.twitter.com/rest/reference/get/statuses/user_timeline"""
    return authenticated_get(
        'https://api.twitter.com/1.1/statuses/user_timeline.json?count=90&'
        'screen_name={}'.format(
            screen_name))
__init__.py 文件源码 项目:helios-server-mixnet 作者: RunasSudo 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def get_url(self, page, **args):
        """
        Returns one of the Facebook URLs (www.facebook.com/SOMEPAGE.php).
        Named arguments are passed as GET query string parameters.

        """
        return 'http://www.facebook.com/%s.php?%s' % (page, urllib.urlencode(args))
pocket.py 文件源码 项目:flow-dashboard 作者: onejgordon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def update_article(access_token, item_id, action='favorite'):
    '''
    Favorite or archive (mark read) an article
    '''
    actions = json.dumps(
        [
            {
                "action": action,
                "item_id": item_id,
                "time": str(int(tools.unixtime(ms=False)))
            }
        ]
    )
    data = urllib.urlencode({
        'access_token': access_token,
        'consumer_key': POCKET_CONSUMER_KEY,
        'actions': actions
    })
    logging.debug(data)
    res = urlfetch.fetch(
        url=MODIFY_ENDPOINT + "?" + data,
        method=urlfetch.GET,
        validate_certificate=True)
    logging.debug(res.content)
    if res.status_code == 200:
        result = json.loads(res.content)
        ok = result.get('status', 0) == 1
        return ok
    else:
        logging.debug(res.headers)
    return False
tools.py 文件源码 项目:StuffShare 作者: StuffShare 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def fetch(url, data=None, headers=None,
          cookie=Cookie.SimpleCookie(),
          user_agent='Mozilla/5.0'):
    headers = headers or {}
    if not data is None:
        data = urllib.urlencode(data)
    if user_agent:
        headers['User-agent'] = user_agent
    headers['Cookie'] = ' '.join(
        ['%s=%s;' % (c.key, c.value) for c in cookie.values()])
    try:
        from google.appengine.api import urlfetch
    except ImportError:
        req = urllib2.Request(url, data, headers)
        html = urllib2.urlopen(req).read()
    else:
        method = ((data is None) and urlfetch.GET) or urlfetch.POST
        while url is not None:
            response = urlfetch.fetch(url=url, payload=data,
                                      method=method, headers=headers,
                                      allow_truncated=False, follow_redirects=False,
                                      deadline=10)
            # next request will be a get, so no need to send the data again
            data = None
            method = urlfetch.GET
            # load cookies from the response
            cookie.load(response.headers.get('set-cookie', ''))
            url = response.headers.get('location')
        html = response.content
    return html
urlfetch.py 文件源码 项目:GAMADV-XTD 作者: taers232c 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [__ConvertDataPart(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = __ConvertDataPart(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = len(data_str)

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers))
tools.py 文件源码 项目:touch-pay-client 作者: HackPucBemobi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def search(self, tags=None, query=None, cloud=True, preview=True,
               limitby=(0, 100), orderby=None):
        if not self.can_search():
            return self.not_authorized()
        request = current.request
        content = CAT()
        if tags is None and query is None:
            form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
                              value=request.vars.q),
                        INPUT(_type="submit", _value=current.T('Search')),
                        _method='GET')
            content.append(DIV(form, _class='w2p_wiki_form'))
            if request.vars.q:
                tags = [v.strip() for v in request.vars.q.split(',')]
                tags = [v.lower() for v in tags if v]
        if tags or query is not None:
            db = self.auth.db
            count = db.wiki_tag.wiki_page.count()
            fields = [db.wiki_page.id, db.wiki_page.slug,
                      db.wiki_page.title, db.wiki_page.tags,
                      db.wiki_page.can_read, db.wiki_page.can_edit]
            if preview:
                fields.append(db.wiki_page.body)
            if query is None:
                query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
                    (db.wiki_tag.name.belongs(tags))
                query = query | db.wiki_page.title.contains(request.vars.q)
            if self.settings.restrict_search and not self.can_manage():
                query = query & (db.wiki_page.created_by == self.auth.user_id)
            pages = db(query).select(count,
                                     *fields, **dict(orderby=orderby or ~count,
                                                     groupby=reduce(lambda a, b: a | b, fields),
                                                     distinct=True,
                                                     limitby=limitby))
            if request.extension in ('html', 'load'):
                if not pages:
                    content.append(DIV(current.T("No results"),
                                       _class='w2p_wiki_form'))

                def link(t):
                    return A(t, _href=URL(args='_search', vars=dict(q=t)))
                items = [DIV(H3(A(p.wiki_page.title, _href=URL(
                    args=p.wiki_page.slug))),
                    MARKMIN(self.first_paragraph(p.wiki_page))
                    if preview else '',
                    DIV(_class='w2p_wiki_tags',
                        *[link(t.strip()) for t in
                          p.wiki_page.tags or [] if t.strip()]),
                    _class='w2p_wiki_search_item')
                    for p in pages]
                content.append(DIV(_class='w2p_wiki_pages', *items))
            else:
                cloud = False
                content = [p.wiki_page.as_dict() for p in pages]
        elif cloud:
            content.append(self.cloud()['content'])
        if request.extension == 'load':
            return content
        return dict(content=content)
tools.py 文件源码 项目:true_review_web2py 作者: lucadealfaro 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def search(self, tags=None, query=None, cloud=True, preview=True,
               limitby=(0, 100), orderby=None):
        if not self.can_search():
            return self.not_authorized()
        request = current.request
        content = CAT()
        if tags is None and query is None:
            form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
                              value=request.vars.q),
                        INPUT(_type="submit", _value=current.T('Search')),
                        _method='GET')
            content.append(DIV(form, _class='w2p_wiki_form'))
            if request.vars.q:
                tags = [v.strip() for v in request.vars.q.split(',')]
                tags = [v.lower() for v in tags if v]
        if tags or not query is None:
            db = self.auth.db
            count = db.wiki_tag.wiki_page.count()
            fields = [db.wiki_page.id, db.wiki_page.slug,
                      db.wiki_page.title, db.wiki_page.tags,
                      db.wiki_page.can_read, db.wiki_page.can_edit]
            if preview:
                fields.append(db.wiki_page.body)
            if query is None:
                query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
                    (db.wiki_tag.name.belongs(tags))
                query = query | db.wiki_page.title.contains(request.vars.q)
            if self.settings.restrict_search and not self.manage():
                query = query & (db.wiki_page.created_by == self.auth.user_id)
            pages = db(query).select(count,
                                     *fields, **dict(orderby=orderby or ~count,
                                                     groupby=reduce(lambda a, b: a | b, fields),
                                                     distinct=True,
                                                     limitby=limitby))
            if request.extension in ('html', 'load'):
                if not pages:
                    content.append(DIV(current.T("No results"),
                                       _class='w2p_wiki_form'))

                def link(t):
                    return A(t, _href=URL(args='_search', vars=dict(q=t)))
                items = [DIV(H3(A(p.wiki_page.title, _href=URL(
                                    args=p.wiki_page.slug))),
                             MARKMIN(self.first_paragraph(p.wiki_page))
                                 if preview else '',
                             DIV(_class='w2p_wiki_tags',
                                 *[link(t.strip()) for t in
                                       p.wiki_page.tags or [] if t.strip()]),
                             _class='w2p_wiki_search_item')
                         for p in pages]
                content.append(DIV(_class='w2p_wiki_pages', *items))
            else:
                cloud = False
                content = [p.wiki_page.as_dict() for p in pages]
        elif cloud:
            content.append(self.cloud()['content'])
        if request.extension == 'load':
            return content
        return dict(content=content)
tools.py 文件源码 项目:Problematica-public 作者: TechMaz 项目源码 文件源码 阅读 103 收藏 0 点赞 0 评论 0
def search(self, tags=None, query=None, cloud=True, preview=True,
               limitby=(0, 100), orderby=None):
        if not self.can_search():
            return self.not_authorized()
        request = current.request
        content = CAT()
        if tags is None and query is None:
            form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
                              value=request.vars.q),
                        INPUT(_type="submit", _value=current.T('Search')),
                        _method='GET')
            content.append(DIV(form, _class='w2p_wiki_form'))
            if request.vars.q:
                tags = [v.strip() for v in request.vars.q.split(',')]
                tags = [v.lower() for v in tags if v]
        if tags or query is not None:
            db = self.auth.db
            count = db.wiki_tag.wiki_page.count()
            fields = [db.wiki_page.id, db.wiki_page.slug,
                      db.wiki_page.title, db.wiki_page.tags,
                      db.wiki_page.can_read, db.wiki_page.can_edit]
            if preview:
                fields.append(db.wiki_page.body)
            if query is None:
                query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
                    (db.wiki_tag.name.belongs(tags))
                query = query | db.wiki_page.title.contains(request.vars.q)
            if self.settings.restrict_search and not self.manage():
                query = query & (db.wiki_page.created_by == self.auth.user_id)
            pages = db(query).select(count,
                                     *fields, **dict(orderby=orderby or ~count,
                                                     groupby=reduce(lambda a, b: a | b, fields),
                                                     distinct=True,
                                                     limitby=limitby))
            if request.extension in ('html', 'load'):
                if not pages:
                    content.append(DIV(current.T("No results"),
                                       _class='w2p_wiki_form'))

                def link(t):
                    return A(t, _href=URL(args='_search', vars=dict(q=t)))
                items = [DIV(H3(A(p.wiki_page.title, _href=URL(
                                    args=p.wiki_page.slug))),
                             MARKMIN(self.first_paragraph(p.wiki_page))
                                 if preview else '',
                             DIV(_class='w2p_wiki_tags',
                                 *[link(t.strip()) for t in
                                       p.wiki_page.tags or [] if t.strip()]),
                             _class='w2p_wiki_search_item')
                         for p in pages]
                content.append(DIV(_class='w2p_wiki_pages', *items))
            else:
                cloud = False
                content = [p.wiki_page.as_dict() for p in pages]
        elif cloud:
            content.append(self.cloud()['content'])
        if request.extension == 'load':
            return content
        return dict(content=content)
tools.py 文件源码 项目:rekall-agent-server 作者: rekall-innovations 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def search(self, tags=None, query=None, cloud=True, preview=True,
               limitby=(0, 100), orderby=None):
        if not self.can_search():
            return self.not_authorized()
        request = current.request
        content = CAT()
        if tags is None and query is None:
            form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
                              value=request.vars.q),
                        INPUT(_type="submit", _value=current.T('Search')),
                        _method='GET')
            content.append(DIV(form, _class='w2p_wiki_form'))
            if request.vars.q:
                tags = [v.strip() for v in request.vars.q.split(',')]
                tags = [v.lower() for v in tags if v]
        if tags or query is not None:
            db = self.auth.db
            count = db.wiki_tag.wiki_page.count()
            fields = [db.wiki_page.id, db.wiki_page.slug,
                      db.wiki_page.title, db.wiki_page.tags,
                      db.wiki_page.can_read, db.wiki_page.can_edit]
            if preview:
                fields.append(db.wiki_page.body)
            if query is None:
                query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
                    (db.wiki_tag.name.belongs(tags))
                query = query | db.wiki_page.title.contains(request.vars.q)
            if self.settings.restrict_search and not self.manage():
                query = query & (db.wiki_page.created_by == self.auth.user_id)
            pages = db(query).select(count,
                                     *fields, **dict(orderby=orderby or ~count,
                                                     groupby=reduce(lambda a, b: a | b, fields),
                                                     distinct=True,
                                                     limitby=limitby))
            if request.extension in ('html', 'load'):
                if not pages:
                    content.append(DIV(current.T("No results"),
                                       _class='w2p_wiki_form'))

                def link(t):
                    return A(t, _href=URL(args='_search', vars=dict(q=t)))
                items = [DIV(H3(A(p.wiki_page.title, _href=URL(
                                    args=p.wiki_page.slug))),
                             MARKMIN(self.first_paragraph(p.wiki_page))
                                 if preview else '',
                             DIV(_class='w2p_wiki_tags',
                                 *[link(t.strip()) for t in
                                       p.wiki_page.tags or [] if t.strip()]),
                             _class='w2p_wiki_search_item')
                         for p in pages]
                content.append(DIV(_class='w2p_wiki_pages', *items))
            else:
                cloud = False
                content = [p.wiki_page.as_dict() for p in pages]
        elif cloud:
            content.append(self.cloud()['content'])
        if request.extension == 'load':
            return content
        return dict(content=content)
urlfetch.py 文件源码 项目:plugin.video.streamondemand-pureita 作者: orione7 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [__ConvertDataPart(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = __ConvertDataPart(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = len(data_str)

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers))
urlfetch.py 文件源码 项目:gdata-python3 作者: dvska 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def request(self, operation, url, data=None, headers=None):
        """Performs an HTTP call to the server, supports GET, POST, PUT, and
        DELETE.

        Usage example, perform and HTTP GET on http://www.google.com/:
          import atom.http
          client = atom.http.HttpClient()
          http_response = client.request('GET', 'http://www.google.com/')

        Args:
          operation: str The HTTP operation to be performed. This is usually one
              of 'GET', 'POST', 'PUT', or 'DELETE'
          data: filestream, list of parts, or other object which can be converted
              to a string. Should be set to None when performing a GET or DELETE.
              If data is a file-like object which can be read, this method will
              read a chunk of 100K bytes at a time and send them.
              If the data is a list of parts to be sent, each part will be
              evaluated and sent.
          url: The full URL to which the request should be sent. Can be a string
              or atom.url.Url.
          headers: dict of strings. HTTP headers which should be sent
              in the request.
        """
        all_headers = self.headers.copy()
        if headers:
            all_headers.update(headers)

        # Construct the full payload.
        # Assume that data is None or a string.
        data_str = data
        if data:
            if isinstance(data, list):
                # If data is a list of different objects, convert them all to strings
                # and join them together.
                converted_parts = [__ConvertDataPart(x) for x in data]
                data_str = ''.join(converted_parts)
            else:
                data_str = __ConvertDataPart(data)

        # If the list of headers does not include a Content-Length, attempt to
        # calculate it based on the data object.
        if data and 'Content-Length' not in all_headers:
            all_headers['Content-Length'] = len(data_str)

        # Set the content type to the default value if none was set.
        if 'Content-Type' not in all_headers:
            all_headers['Content-Type'] = 'application/atom+xml'

        # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
        if operation == 'GET':
            method = urlfetch.GET
        elif operation == 'POST':
            method = urlfetch.POST
        elif operation == 'PUT':
            method = urlfetch.PUT
        elif operation == 'DELETE':
            method = urlfetch.DELETE
        else:
            method = None
        return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
                                           method=method, headers=all_headers))
auth_utils.py 文件源码 项目:montage 作者: storyful 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def auth_user(fn):
    """
        Decorator to force user to be logged in with GAE
    """
    @functools.wraps(fn)
    def _wrapped(request, *args, **kwargs):
        temp_request = request
        bearer = request.META['HTTP_AUTHORIZATION']
        url = "https://www.googleapis.com/userinfo/v2/me"
        result = urlfetch.fetch(url=url,
            method=urlfetch.GET,
            headers={"Authorization" : bearer})
        contents = json.loads(result.content)
        gae_user = users.get_current_user()
        is_admin = users.is_current_user_admin()

        User = get_user_model()
        django_user = None
        try:
            logging.debug("Getting django user")
            django_user = User.objects.get(
                email=contents['email'])
        except User.DoesNotExist:
            logging.info("User does not exist in Montage. Checking pending users")
            try:
                pending_user = PendingUser.objects.get(
                    email=contents['email'])
            except PendingUser.DoesNotExist:
                logging.info("No pending user record for this email")
                user, created = get_user_model().objects.get_or_create(
                    email=email,
                    defaults={
                        'username': email.split('@')[0],
                        'is_active': True
                    }
                )
                return user
            else:
                logging.info("Pending user record found. Activating user.")
                django_user = activate_pending_user(
                    pending_user, gae_user, is_admin)
        except AttributeError:
            return HttpResponseForbidden()

        else:
            logging.info("User found. Updating gaia_id and superuser status")
            request = temp_request
            # update_user(django_user, is_admin)

        if django_user:
            request.user = django_user
        else:
            return HttpResponseForbidden()

        return fn(request, *args, **kwargs)
    return _wrapped
tools.py 文件源码 项目:slugiot-client 作者: slugiot 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def search(self, tags=None, query=None, cloud=True, preview=True,
               limitby=(0, 100), orderby=None):
        if not self.can_search():
            return self.not_authorized()
        request = current.request
        content = CAT()
        if tags is None and query is None:
            form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
                              value=request.vars.q),
                        INPUT(_type="submit", _value=current.T('Search')),
                        _method='GET')
            content.append(DIV(form, _class='w2p_wiki_form'))
            if request.vars.q:
                tags = [v.strip() for v in request.vars.q.split(',')]
                tags = [v.lower() for v in tags if v]
        if tags or not query is None:
            db = self.auth.db
            count = db.wiki_tag.wiki_page.count()
            fields = [db.wiki_page.id, db.wiki_page.slug,
                      db.wiki_page.title, db.wiki_page.tags,
                      db.wiki_page.can_read, db.wiki_page.can_edit]
            if preview:
                fields.append(db.wiki_page.body)
            if query is None:
                query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
                    (db.wiki_tag.name.belongs(tags))
                query = query | db.wiki_page.title.contains(request.vars.q)
            if self.settings.restrict_search and not self.manage():
                query = query & (db.wiki_page.created_by == self.auth.user_id)
            pages = db(query).select(count,
                                     *fields, **dict(orderby=orderby or ~count,
                                                     groupby=reduce(lambda a, b: a | b, fields),
                                                     distinct=True,
                                                     limitby=limitby))
            if request.extension in ('html', 'load'):
                if not pages:
                    content.append(DIV(current.T("No results"),
                                       _class='w2p_wiki_form'))

                def link(t):
                    return A(t, _href=URL(args='_search', vars=dict(q=t)))
                items = [DIV(H3(A(p.wiki_page.title, _href=URL(
                                    args=p.wiki_page.slug))),
                             MARKMIN(self.first_paragraph(p.wiki_page))
                                 if preview else '',
                             DIV(_class='w2p_wiki_tags',
                                 *[link(t.strip()) for t in
                                       p.wiki_page.tags or [] if t.strip()]),
                             _class='w2p_wiki_search_item')
                         for p in pages]
                content.append(DIV(_class='w2p_wiki_pages', *items))
            else:
                cloud = False
                content = [p.wiki_page.as_dict() for p in pages]
        elif cloud:
            content.append(self.cloud()['content'])
        if request.extension == 'load':
            return content
        return dict(content=content)
remote_api_put_stub.py 文件源码 项目:Deploy_XXNET_Server 作者: jzp820927 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_remote_app_id(remote_url, extra_headers=None):
  """Get the app_id from the remote_api endpoint.

  This also has the side effect of verifying that it is a remote_api endpoint.

  Args:
    remote_url: The url to the remote_api handler.
    extra_headers: Headers to send (for authentication).

  Returns:
    app_id: The app_id of the target app.

  Raises:
    FetchFailed: Urlfetch call failed.
    ConfigurationError: URLfetch succeeded but results were invalid.
  """
  rtok = str(random.random())[2:]
  url = remote_url + '?rtok=' + rtok
  if not extra_headers:
    extra_headers = {}
  if 'X-appcfg-api-version' not in extra_headers:
    extra_headers['X-appcfg-api-version'] = '1'
  try:
    urlfetch_response = urlfetch.fetch(url, None, urlfetch.GET,
                                       extra_headers, follow_redirects=False,
                                       deadline=10)
  except Exception, e:


    logging.exception('Fetch failed to %s', remote_url)
    raise FetchFailed('Fetch to %s failed: %r' % (remote_url, e))
  if urlfetch_response.status_code != 200:
    logging.error('Fetch failed to %s; Status %s; body %s',
                  remote_url,
                  urlfetch_response.status_code,
                  urlfetch_response.content)
    raise FetchFailed('Fetch to %s failed with status %s' %
                      (remote_url, urlfetch_response.status_code))
  response = urlfetch_response.content
  if not response.startswith('{'):
    logging.info('Response unparasable: %s', response)
    raise ConfigurationError(
        'Invalid response received from server: %s' % response)
  app_info = yaml.load(response)
  if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
    logging.info('Response unparsable: %s', response)
    raise ConfigurationError('Error parsing app_id lookup response')
  if str(app_info['rtok']) != rtok:
    logging.info('Response invalid token (expected %s): %s', rtok, response)
    raise ConfigurationError('Token validation failed during app_id lookup. '
                             '(sent %s, got %s)' % (repr(rtok),
                                                    repr(app_info['rtok'])))
  return app_info['app_id']
urlfetch.py 文件源码 项目:GAMADV-X 作者: taers232c 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def request(self, operation, url, data=None, headers=None):
    """Performs an HTTP call to the server, supports GET, POST, PUT, and
    DELETE.

    Usage example, perform and HTTP GET on http://www.google.com/:
      import atom.http
      client = atom.http.HttpClient()
      http_response = client.request('GET', 'http://www.google.com/')

    Args:
      operation: str The HTTP operation to be performed. This is usually one
          of 'GET', 'POST', 'PUT', or 'DELETE'
      data: filestream, list of parts, or other object which can be converted
          to a string. Should be set to None when performing a GET or DELETE.
          If data is a file-like object which can be read, this method will
          read a chunk of 100K bytes at a time and send them.
          If the data is a list of parts to be sent, each part will be
          evaluated and sent.
      url: The full URL to which the request should be sent. Can be a string
          or atom.url.Url.
      headers: dict of strings. HTTP headers which should be sent
          in the request.
    """
    all_headers = self.headers.copy()
    if headers:
      all_headers.update(headers)

    # Construct the full payload.
    # Assume that data is None or a string.
    data_str = data
    if data:
      if isinstance(data, list):
        # If data is a list of different objects, convert them all to strings
        # and join them together.
        converted_parts = [__ConvertDataPart(x) for x in data]
        data_str = ''.join(converted_parts)
      else:
        data_str = __ConvertDataPart(data)

    # If the list of headers does not include a Content-Length, attempt to
    # calculate it based on the data object.
    if data and 'Content-Length' not in all_headers:
      all_headers['Content-Length'] = len(data_str)

    # Set the content type to the default value if none was set.
    if 'Content-Type' not in all_headers:
      all_headers['Content-Type'] = 'application/atom+xml'

    # Lookup the urlfetch operation which corresponds to the desired HTTP verb.
    if operation == 'GET':
      method = urlfetch.GET
    elif operation == 'POST':
      method = urlfetch.POST
    elif operation == 'PUT':
      method = urlfetch.PUT
    elif operation == 'DELETE':
      method = urlfetch.DELETE
    else:
      method = None
    return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
        method=method, headers=all_headers))
handlersoauth.py 文件源码 项目:enkiWS 作者: juliettef 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def auth_callback_provider( self ):
        # STEP 3
        oauth_verifier = self.request.get( 'oauth_verifier' )
        params = [( 'oauth_consumer_key' , settings.secrets.CLIENT_ID_TWITTER ),
                  ( 'oauth_nonce' , webapp2_extras.security.generate_random_string( length = 42, pool = webapp2_extras.security.ALPHANUMERIC ).encode( 'utf-8' )),
                  ( 'oauth_signature_method' , "HMAC-SHA1" ),
                  ( 'oauth_timestamp' , str( int( time.time()))),
                  ( 'oauth_token', self.session.get( 'twitter_oauth_token' )),
                  ( 'oauth_version' , "1.0" )]
        normalised_url = 'https://api.twitter.com/oauth/access_token/'
        oauth_signature = self.auth_sign( normalised_url, params, self.session.get( 'twitter_oauth_token_secret') )
        params.append(( 'oauth_signature', oauth_signature ))
        params.append(( 'oauth_verifier', oauth_verifier ))
        url_params = enki.libutil.urlencode( params )
        result = self.urlfetch_safe( url = normalised_url, payload = url_params, method = urlfetch.POST )
        response = self.process_result_as_query_string( result )
        oauth_token = response.get( 'oauth_token' )
        oauth_token_secret = response.get('oauth_token_secret')
        user_id = response.get( 'user_id')
        if user_id and oauth_token:
            #get email address if we can
            verify_params = [('include_email', 'true'),
                             ('include_entities','false'),
                             ('oauth_consumer_key', settings.secrets.CLIENT_ID_TWITTER ),
                             ('oauth_nonce', webapp2_extras.security.generate_random_string( length = 42, pool = webapp2_extras.security.ALPHANUMERIC ).encode( 'utf-8' )),
                             ('oauth_signature_method', "HMAC-SHA1"),
                             ('oauth_timestamp', str(int(time.time()))),
                             ('oauth_token', oauth_token ),
                             ('oauth_version', "1.0"),
                             ('skip_status', 'true')]
            verify_oauth_signature = self.auth_sign('https://api.twitter.com/1.1/account/verify_credentials.json', verify_params,oauth_token_secret, method_get=True )
            verify_params.append(('oauth_signature', verify_oauth_signature))
            verify_url_params = enki.libutil.urlencode( verify_params )
            full_url = 'https://api.twitter.com/1.1/account/verify_credentials.json?' + verify_url_params
            verify_credentials_result_json = self.urlfetch_safe( url = full_url, method = urlfetch.GET )
            verify_credentials_result = self.process_result_as_JSON(verify_credentials_result_json)
            response['email'] = verify_credentials_result['email']
            response['email_verified'] = True
            loginInfoSettings = { 'provider_uid': 'user_id',
                                  'email': 'email',
                                  'email_verified': 'email_verified' }
            loginInfo = self.process_login_info( loginInfoSettings, response )
            self.provider_authenticated_callback( loginInfo )
        else:
            self.abort( 401 )
        return
goodreads.py 文件源码 项目:flow-dashboard 作者: onejgordon 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_books_on_shelf(user, shelf='currently-reading'):
    '''
    Return JSON array {title, author, isbn, image}
    '''
    user_id = user.get_integration_prop('goodreads_user_id')
    readables = []
    success = False
    if user_id:
        data = urllib.urlencode({
            'shelf': shelf,
            'key': GR_API_KEY,
            'v': 2
        })
        params = data
        url = "https://www.goodreads.com/review/list/%s.xml?%s" % (user_id, params)
        logging.debug("Fetching %s for %s" % (url, user))
        res = urlfetch.fetch(
            url=url,
            method=urlfetch.GET,
            validate_certificate=True)
        logging.debug(res.status_code)
        if res.status_code == 200:
            xml = res.content
            data = etree.parse(StringIO(xml))
            for r in data.getroot().find('reviews').findall('review'):
                book = r.find('book')
                isbn = book.find('isbn13').text
                image_url = book.find('image_url').text
                title = book.find('title').text
                authors = book.find('authors')
                link = book.find('link').text
                first_author = authors.find('author')
                if first_author is not None:
                    name = first_author.find('name')
                    if name is not None:
                        author = name.text
                r = Readable.CreateOrUpdate(user, isbn, title=title,
                                            url=link, source='goodreads',
                                            image_url=image_url, author=author,
                                            type=READABLE.BOOK,
                                            read=False)
                readables.append(r)
            success = True
        logging.debug("Putting %d readable(s)" % len(readables))
        ndb.put_multi(readables)
        Readable.put_sd_batch(readables)
    return (success, readables)
tools.py 文件源码 项目:StuffShare 作者: StuffShare 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def search(self, tags=None, query=None, cloud=True, preview=True,
               limitby=(0, 100), orderby=None):
        if not self.can_search():
            return self.not_authorized()
        request = current.request
        content = CAT()
        if tags is None and query is None:
            form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
                              value=request.vars.q),
                        INPUT(_type="submit", _value=current.T('Search')),
                        _method='GET')
            content.append(DIV(form, _class='w2p_wiki_form'))
            if request.vars.q:
                tags = [v.strip() for v in request.vars.q.split(',')]
                tags = [v.lower() for v in tags if v]
        if tags or not query is None:
            db = self.auth.db
            count = db.wiki_tag.wiki_page.count()
            fields = [db.wiki_page.id, db.wiki_page.slug,
                      db.wiki_page.title, db.wiki_page.tags,
                      db.wiki_page.can_read]
            if preview:
                fields.append(db.wiki_page.body)
            if query is None:
                query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
                    (db.wiki_tag.name.belongs(tags))
                query = query | db.wiki_page.title.contains(request.vars.q)
            if self.settings.restrict_search and not self.manage():
                query = query & (db.wiki_page.created_by == self.auth.user_id)
            pages = db(query).select(count,
                *fields, **dict(orderby=orderby or ~count,
                               groupby=reduce(lambda a, b: a | b, fields),
                               distinct=True,
                               limitby=limitby))
            if request.extension in ('html', 'load'):
                if not pages:
                    content.append(DIV(current.T("No results"),
                                       _class='w2p_wiki_form'))

                def link(t):
                    return A(t, _href=URL(args='_search', vars=dict(q=t)))
                items = [DIV(H3(A(p.wiki_page.title, _href=URL(
                                    args=p.wiki_page.slug))),
                             MARKMIN(self.first_paragraph(p.wiki_page))
                                 if preview else '',
                             DIV(_class='w2p_wiki_tags',
                                 *[link(t.strip()) for t in
                                       p.wiki_page.tags or [] if t.strip()]),
                             _class='w2p_wiki_search_item')
                         for p in pages]
                content.append(DIV(_class='w2p_wiki_pages', *items))
            else:
                cloud = False
                content = [p.wiki_page.as_dict() for p in pages]
        elif cloud:
            content.append(self.cloud()['content'])
        if request.extension == 'load':
            return content
        return dict(content=content)


问题


面经


文章

微信
公众号

扫码关注公众号