python类URLError()的实例源码

rest.py 文件源码 项目:Cortex-Analyzers 作者: CERT-BDF 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get(self, url, proxy=None):
        if proxy:
            proxy = urllib2.ProxyHandler({'http': proxy})
            opener = urllib2.build_opener(proxy)
            urllib2.install_opener(opener)

        try:
            response = urllib2.urlopen(url)
        except HTTPError, e:
            resp = e.read()
            self.status_code = e.code
        except URLError, e:
            resp = e.read()
            self.status_code = e.code
        else:
            self.status_code = response.code
            resp = response.read()

        return resp
hippo.py 文件源码 项目:Cortex-Analyzers 作者: CERT-BDF 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def run(self):
        data = self.getData()

        value = {
            data: {
                "type": self.data_type
            }
        }
        json_data = json.dumps(value)
        post_data = json_data.encode('utf-8')
        headers = {'Content-Type': 'application/json'}

        try:
            request = urllib2.Request('{}/hippocampe/api/v1.0/{}'.format(self.url, self.service), post_data, headers)
            response = urllib2.urlopen(request)
            report = json.loads(response.read())

            self.report(report)
        except urllib2.HTTPError:
            self.error("Hippocampe: " + str(sys.exc_info()[1]))
        except urllib2.URLError:
            self.error("Hippocampe: service is not available")
        except Exception as e:
            self.unexpectedError(e)
filesDownloader.py 文件源码 项目:Crawl-And-Download 作者: AsciiKay 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def downloadFilesSave(links, fileFormat):    # main function
    if (links == 'EMPTY'):    # if links list is empty
        return '  NO LINKS FOUND !'

    for link in links:
        name = random.randint(0, 10000001)
        if (name in os.listdir(os.getcwd())): # random name to files
            name = random.randint(0, 10000001)

        if (format not in ['zip', 'png', 'jpg', 'jpeg', 'tiff', 'bmp', 'svg', 'gif']):
            try:
                saveFile=open(str(name)+'.' + fileFormat, 'w')
                saveFile.write(urllib2.urlopen(link).read())
                saveFile.close()
            except urllib2.URLError:
                pass

        else:
            try:
                saveFile=open(str(name)+'.' + fileFormat, 'wb')
                saveFile.write(urllib2.urlopen(link).read())
                saveFile.close()
            except urllib2.URLError:
                pass
    return ' {} DOWNLOADS SUCCESSFULL YET !'.format(len(os.listdir(os.getcwd())))
tools.py 文件源码 项目:mongoaudit 作者: Exploit-install 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def send_result(email, result, title, urn):
    """
    Args:
        email (str): address to send the results
        result (obj): results to send
        title (str):
        urn (str): uniform resource name
    Returns:
        str: response from endpoint
    """
    url = 'https://mongoaud.it/results'
    headers = {'Content-type': 'application/json',
               'Accept': 'application/json'}
    values = {'email': email, 'result': result, 'title': title, 'urn': urn, 'date': get_date()}
    try:
        req = urllib2.Request(url, json.dumps(values), headers)
        response = urllib2.urlopen(req)
        return response.read()
    except (urllib2.HTTPError, urllib2.URLError) as exc:
        return "Sadly enough, we are having technical difficulties at the moment, " \
               "please try again later.\n\n%s" % str(exc)
tools.py 文件源码 项目:mongoaudit 作者: Exploit-install 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def check_version(version):
    # if application is binary then check for latest version
    if getattr(sys, 'frozen', False):
        try:
            url = "https://api.github.com/repos/stampery/mongoaudit/releases/latest"
            req = urllib2.urlopen(url)
            releases = json.loads(req.read())
            latest = releases["tag_name"]
            if version < latest:
                print("mongoaudit version " + version)
                print("There's a new version " + latest)
                _upgrade(releases)

        except (urllib2.HTTPError, urllib2.URLError):
            print("Couldn't check for upgrades")
        except os.error:
            print("Couldn't write mongoaudit binary")
wpforce.py 文件源码 项目:WPForce 作者: n00py 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def TestSite(url):
    protocheck(url)
    print "Trying: " + url
    try:
        urllib2.urlopen(url, timeout=3)
    except urllib2.HTTPError, e:
        if e.code == 405:
            print url + " found!"
            print "Now the brute force will begin!  >:)"
        if e.code == 404:
            printout(str(e), YELLOW)
            print " - XMLRPC has been moved, removed, or blocked"
            sys.exit()
    except urllib2.URLError, g:
        printout("Could not identify XMLRPC.  Please verify the domain.\n", YELLOW)
        sys.exit()
    except socket.timeout as e:
        print type(e)
        printout("The socket timed out, try it again.", YELLOW)
        sys.exit()
distnet.py 文件源码 项目:SoCFoundationFlow 作者: mattaw 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def compute_dependencies(self, filename=REQUIRES):
        text = Utils.readf(filename)
        data = safe_urlencode([('text', text)])

        if '--offline' in sys.argv:
            self.constraints = self.local_resolve(text)
        else:
            req = Request(get_resolve_url(), data)
            try:
                response = urlopen(req, timeout=TIMEOUT)
            except URLError as e:
                Logs.warn('The package server is down! %r' % e)
                self.constraints = self.local_resolve(text)
            else:
                ret = response.read()
                try:
                    ret = ret.decode('utf-8')
                except Exception:
                    pass
                self.trace(ret)
                self.constraints = parse_constraints(ret)
        self.check_errors()
distnet.py 文件源码 项目:SoCFoundationFlow 作者: mattaw 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def compute_dependencies(self, filename=REQUIRES):
        text = Utils.readf(filename)
        data = safe_urlencode([('text', text)])

        if '--offline' in sys.argv:
            self.constraints = self.local_resolve(text)
        else:
            req = Request(get_resolve_url(), data)
            try:
                response = urlopen(req, timeout=TIMEOUT)
            except URLError as e:
                Logs.warn('The package server is down! %r' % e)
                self.constraints = self.local_resolve(text)
            else:
                ret = response.read()
                try:
                    ret = ret.decode('utf-8')
                except Exception:
                    pass
                self.trace(ret)
                self.constraints = parse_constraints(ret)
        self.check_errors()
distnet.py 文件源码 项目:SoCFoundationFlow 作者: mattaw 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def compute_dependencies(self, filename=REQUIRES):
        text = Utils.readf(filename)
        data = safe_urlencode([('text', text)])

        if '--offline' in sys.argv:
            self.constraints = self.local_resolve(text)
        else:
            req = Request(get_resolve_url(), data)
            try:
                response = urlopen(req, timeout=TIMEOUT)
            except URLError as e:
                Logs.warn('The package server is down! %r' % e)
                self.constraints = self.local_resolve(text)
            else:
                ret = response.read()
                try:
                    ret = ret.decode('utf-8')
                except Exception:
                    pass
                self.trace(ret)
                self.constraints = parse_constraints(ret)
        self.check_errors()
gather.py 文件源码 项目:SPF 作者: Exploit-install 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def search(self, url, offset=1, maxoffset=0, title=""):
        current_offset = 0
        data = ""
        self.p.reset(title=title)
        while current_offset <= maxoffset:
            self.p.rotate()
            temp_url = re.sub(r'\[\[OFFSET\]\]', str(current_offset), url)
            try:
                headers = { 'User-Agent' : self.user_agent }
                req = urllib2.Request(temp_url, None, headers)
                data += urllib2.urlopen(req).read()
            except urllib2.URLError as e:
                self.display.error("Could not access [%s]" % (title))
                return data
            except Exception as e:
                print e
            current_offset += offset
        self.p.done()
        return data
springenwerk.py 文件源码 项目:darkc0de-old-stuff 作者: tuwid 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def getURLContents(self, url, data=None):    
        "Returns the contents of the given URL as an Unicode string" 

        s = ""
        success = False

        req = Request(url, data, {'User-agent': self.useragent})

        try:
            f = urlopen(req)  
            s = f.read()
            f.close()
            success = True
        except HTTPError, e:
            print 'Server error: ', e.code
            if (self.verbose and BaseHTTPRequestHandler.responses.has_key(e.code)):
                title, msg = BaseHTTPRequestHandler.responses[e.code]            
                print title + ": " + msg
        except URLError, e:
            print 'Connection error: ', e.reason

        dammit = UnicodeDammit(s)    

        return (success, dammit.unicode)
google.py 文件源码 项目:darkc0de-old-stuff 作者: tuwid 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def getCookie(self):
        """
        This method is the first to be called when initializing a
        Google dorking object through this library. It is used to
        retrieve the Google session cookie needed to perform the
        further search
        """

        try:
            conn = self.opener.open("http://www.google.com/ncr")
            headers = conn.info()
        except urllib2.HTTPError, e:
            headers = e.info()
        except urllib2.URLError, e:
            errMsg = "unable to connect to Google"
            raise sqlmapConnectionException, errMsg
couchbase.py 文件源码 项目:collectd-couchbase 作者: signalfx 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _api_call(url, opener):
    """
    Makes a REST call against the Couchbase API.
    Args:
    url (str): The URL to get, including endpoint
    Returns:
    list: The JSON response
    """
    try:
        urllib2.install_opener(opener)
        resp = urllib2.urlopen(url, timeout=http_timeout)
    except (urllib2.HTTPError, urllib2.URLError) as e:
        collectd.error("Error making API call (%s) %s" % (e, url))
        return None
    try:
        return json.load(resp)
    except ValueError, e:
        collectd.error("Error parsing JSON for API call (%s) %s" % (e, url))
        return None
woxikon_de_lookup.py 文件源码 项目:thesaurus_query.vim 作者: Ron89 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _woxikon_de_url_handler(target):
    '''
    Query woxikon for sysnonym
    '''
    time_out_choice = float(get_variable(
        'tq_online_backends_timeout', _timeout_period_default))
    try:
        response = urlopen(fixurl(u'http://synonyms.woxikon.com/de/{0}'.format(target)).decode('ASCII'), timeout = time_out_choice)
        web_content = StringIO(unescape(decode_utf_8(response.read())))
        response.close()
    except HTTPError:
        return 1
    except URLError as err:
        if isinstance(err.reason, socket.timeout):  # timeout error?
            return 1
        return -1   # other error
    except socket.timeout:  # timeout error failed to be captured by URLError
        return 1
    return web_content
jeck_ru_lookup.py 文件源码 项目:thesaurus_query.vim 作者: Ron89 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _jeck_ru_url_handler(target):
    '''
    Query jiport for sysnonym
    '''
    time_out_choice = float(get_variable(
        'tq_online_backends_timeout', _timeout_period_default))
    try:
        response = urlopen(fixurl(u'http://jeck.ru/tools/SynonymsDictionary/{0}'.format(target)).decode('ASCII'), timeout = time_out_choice)
        web_content = StringIO(decode_utf_8(response.read()))
        response.close()
    except HTTPError:
        return 1
    except URLError as err:
        if isinstance(err.reason, socket.timeout):  # timeout error?
            return 1
        return -1   # any other error
    except socket.timeout:  # if timeout error not captured by URLError
        return 1
    return web_content
suneo-bot.py 文件源码 项目:Shodita 作者: Quantika14 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_target():
    global client, db
    cursor = db.Shodita.find({"bot":"Shizuka"})
    for document in cursor:
        if check_domain_mongodb(document["ip"], document["dominio"]):
            print colores.verde + "[INFO] Domain: " + document["dominio"] + " already scanned" + colores.normal
            pass
        else:
            url = "http://" + document["dominio"]
            headers = {'User-Agent' : 'Mozilla 5.10'}
            request = Request(url, None, headers)
            try:
                response = urlopen(request, timeout=10)
                if response.code == 200 or response.code == "OK":
                    html = response.read()
                    if detect_wp(html, document["dominio"]) == True:
                        insert_mongodb("WordPress", document["dominio"], document["ip"])
                        print colores.verde + "[+][INFO] " + document["dominio"] + " is WordPress" + colores.normal
                    if detect_joomla(html):
                        insert_mongodb("Joomla", document["dominio"], document["ip"])
                        print colores.verde + "[+][INFO] " + document["dominio"] + " is Joomla" + colores.normal
                    if detect_drupal(html):
                        insert_mongodb("Drupal", document["dominio"], document["ip"])
                        print colores.verde + "[+][INFO] " + document["dominio"] + " is Drupal" + colores.normal
            except URLError, e:
                continue
            except httplib.BadStatusLine:
                continue
            except:
                continue
magicmirrorplatform.py 文件源码 项目:AlexaPi 作者: alexa-pi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def mm_heartbeat(self):
        # Check if stop or set next timer
        if self.shutdown:
            return
        threading.Timer(self.hb_timer, self.mm_heartbeat).start()

        address = ("http://" + self.mm_host + ":" + self.mm_port + "/alexapi?action=AVSHB")

        logger.debug("Sending MM Heatbeat")

        try:
            response = urlopen(address).read()
        except URLError as err:
            logger.error("URLError: %s", err.reason)
            return

        logger.debug("Response: " + response)
service.py 文件源码 项目:devsecops-example-helloworld 作者: boozallen 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def send_remote_shutdown_command(self):
        try:
            from urllib import request as url_request
            URLError = url_request.URLError
        except ImportError:
            import urllib2 as url_request
            import urllib2
            URLError = urllib2.URLError

        try:
            url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port)
        except URLError:
            return
        count = 0
        while self.is_connectable():
            if count == 30:
                break
            count += 1
            time.sleep(1)
utils.py 文件源码 项目:plugin.video.auvio 作者: rickybiscus 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def request_url(url, referer='http://www.google.com'):
    common.plugin.log('request_url : %s' % url)
    req = urllib2.Request(url)
    req.addheaders = [('Referer', referer),('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.3) Gecko/20100101 Firefox/11.0 ( .NET CLR 3.5.30729)')]

    try:
        response = urllib2.urlopen(req)
        data = response.read()
        response.close()
    except urllib2.URLError as e:
        common.plugin.log_error("Remote request error for URL %s: %r" % (url,e))
        return
    except socket.timeout, e:
        common.plugin.log_error("Remote request error for URL %s: %r" % (url,e))
        return

    return data
basic_auth.py 文件源码 项目:touch-pay-client 作者: HackPucBemobi 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def basic_auth(server="http://127.0.0.1"):
    """
    to use basic login with a different server
    from gluon.contrib.login_methods.basic_auth import basic_auth
    auth.settings.login_methods.append(basic_auth('http://server'))
    """

    def basic_login_aux(username,
                        password,
                        server=server):
        key = base64.b64encode(username + ':' + password)
        headers = {'Authorization': 'Basic ' + key}
        request = urllib2.Request(server, None, headers)
        try:
            urllib2.urlopen(request)
            return True
        except (urllib2.URLError, urllib2.HTTPError):
            return False
    return basic_login_aux
multitester.py 文件源码 项目:TCP-IP 作者: JackZ0 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def block_until_http_ready(urlstring, wait_time=10, timeout=240):
    "Blocks until server at urlstring can respond to http requests"
    server_ready = False
    t_elapsed = 0
    while not server_ready and t_elapsed < timeout:
        try:
            sys.stdout.write('.')
            sys.stdout.flush()
            req = urllib2.Request(urlstring)
            response = urllib2.urlopen(req)
            #if response.code == 200:
            server_ready = True
        except urllib2.URLError:
            pass
        time.sleep(wait_time)
        t_elapsed += wait_time
job.py 文件源码 项目:dataScryer 作者: Griesbacher 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def run(self):
        if len(self.__update_rates) == 0:
            return

        # wait up to 120 seconds, to get some distortion
        self.__stop_event.wait(randint(0, 120))

        while not self.__stop_event.is_set():
            start = time.time()
            for update in self.__update_rates:
                rate = update[0]
                now = time.time()
                time_to_wait = round(start - now + rate / 1000, 0)
                interrupt = self.__stop_event.wait(time_to_wait)
                if interrupt:
                    return
                try:
                    self.start_calculation(update[1])
                except URLError as e:
                    logging.getLogger(__name__).error("Could not connect to InfluxDB: " + str(e))
                except:
                    logging.getLogger(__name__).error("Job execution failed", exc_info=True)
github_importer.py 文件源码 项目:CrowdAnki 作者: Stvad 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def download_and_import(self, repo):
        try:
            response = urllib2.urlopen(GITHUB_LINK.format(repo))
            response_sio = StringIO.StringIO(response.read())
            with zipfile.ZipFile(response_sio) as repo_zip:
                repo_zip.extractall(tempfile.tempdir)

            deck_base_name = repo.split("/")[-1]
            deck_directory_wb = Path(tempfile.tempdir).joinpath(deck_base_name + "-" + BRANCH_NAME)
            deck_directory = Path(tempfile.tempdir).joinpath(deck_base_name)
            utils.fs_remove(deck_directory)
            deck_directory_wb.rename(deck_directory)
            # Todo progressbar on download

            AnkiJsonImporter.import_deck(self.collection, deck_directory)

        except (urllib2.URLError, urllib2.HTTPError, OSError) as error:
            aqt.utils.showWarning("Error while trying to get deck from Github: {}".format(error))
            raise
run.py 文件源码 项目:electron-crash-reporter 作者: lipis 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def check_for_update():
  if os.path.exists(FILE_UPDATE):
    mtime = os.path.getmtime(FILE_UPDATE)
    last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
    today = datetime.utcnow().strftime('%Y-%m-%d')
    if last == today:
      return
  try:
    with open(FILE_UPDATE, 'a'):
      os.utime(FILE_UPDATE, None)
    request = urllib2.Request(
      CORE_VERSION_URL,
      urllib.urlencode({'version': main.__version__}),
    )
    response = urllib2.urlopen(request)
    with open(FILE_UPDATE, 'w') as update_json:
      update_json.write(response.read())
  except (urllib2.HTTPError, urllib2.URLError):
    pass
browser.py 文件源码 项目:doork 作者: AeonDave 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_page(self, url, data=None):
        handlers = [PoolHTTPHandler]
        opener = urllib2.build_opener(*handlers)
        if data: data = urllib.urlencode(data)
        request = urllib2.Request(url, data, self.headers)
        try:
            response = opener.open(request)
            return response.read()
        except (urllib2.HTTPError, urllib2.URLError), e:
            raise BrowserError(url, str(e))
        except (socket.error, socket.sslerror), msg:
            raise BrowserError(url, msg)
        except socket.timeout, e:
            raise BrowserError(url, "timeout")
        except KeyboardInterrupt:
            raise
        except:
            raise BrowserError(url, "unknown error")
run.py 文件源码 项目:meet-notes 作者: lipis 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def check_for_update():
  if os.path.exists(FILE_UPDATE):
    mtime = os.path.getmtime(FILE_UPDATE)
    last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
    today = datetime.utcnow().strftime('%Y-%m-%d')
    if last == today:
      return
  try:
    with open(FILE_UPDATE, 'a'):
      os.utime(FILE_UPDATE, None)
    request = urllib2.Request(
      CORE_VERSION_URL,
      urllib.urlencode({'version': main.__version__}),
    )
    response = urllib2.urlopen(request)
    with open(FILE_UPDATE, 'w') as update_json:
      update_json.write(response.read())
  except (urllib2.HTTPError, urllib2.URLError):
    pass
home.py 文件源码 项目:aerospike-telemetry-agent 作者: aerospike 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def postInfo(self, requestParams):
        logging.info("About to phone home to [%s].", self.url)

        req = urllib2.Request(self.url)
        req.add_header('Content-Type', 'application/json')
        resp = None

        try:
            resp = urllib2.urlopen(req, json.dumps(requestParams), timeout = 30, **self.kwargs)
            resp = resp.read()
        except urllib2.HTTPError, e:
            logging.error("HTTPError: %s", str(e.code))
        except urllib2.URLError, e:
            logging.error("URLError: %s", str(e.reason))
        except httplib.HTTPException, e:
            logging.error("HTTPException: %s", str(e))
        except Exception, e:
            logging.exception("Unexpected error: %s", str(e))

        return resp
link_crawler.py 文件源码 项目:WebScraping 作者: liinnux 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def download(url, headers, proxy, num_retries, data=None):
    print 'Downloading:', url
    request = urllib2.Request(url, data, headers)
    opener = urllib2.build_opener()
    if proxy:
        proxy_params = {urlparse.urlparse(url).scheme: proxy}
        opener.add_handler(urllib2.ProxyHandler(proxy_params))
    try:
        response = opener.open(request)
        html = response.read()
        code = response.code
    except urllib2.URLError as e:
        print 'Download error:', e.reason
        html = ''
        if hasattr(e, 'code'):
            code = e.code
            if num_retries > 0 and 500 <= code < 600:
                # retry 5XX HTTP errors
                html = download(url, headers, proxy, num_retries-1, data)
        else:
            code = None
    return html
link_crawler3.py 文件源码 项目:WebScraping 作者: liinnux 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def download(url, headers, proxy, num_retries, data=None):
    print 'Downloading:', url
    request = urllib2.Request(url, data, headers)
    opener = urllib2.build_opener()
    if proxy:
        proxy_params = {urlparse.urlparse(url).scheme: proxy}
        opener.add_handler(urllib2.ProxyHandler(proxy_params))
    try:
        response = opener.open(request)
        html = response.read()
        code = response.code
    except urllib2.URLError as e:
        print 'Download error:', e.reason
        html = ''
        if hasattr(e, 'code'):
            code = e.code
            if num_retries > 0 and 500 <= code < 600:
                # retry 5XX HTTP errors
                return download(url, headers, proxy, num_retries-1, data)
        else:
            code = None
    return html
common.py 文件源码 项目:WebScraping 作者: liinnux 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def download5(url, user_agent='wswp', proxy=None, num_retries=2):
    """Download function with support for proxies"""
    print 'Downloading:', url
    headers = {'User-agent': user_agent}
    request = urllib2.Request(url, headers=headers)
    opener = urllib2.build_opener()
    if proxy:
        proxy_params = {urlparse.urlparse(url).scheme: proxy}
        opener.add_handler(urllib2.ProxyHandler(proxy_params))
    try:
        html = opener.open(request).read()
    except urllib2.URLError as e:
        print 'Download error:', e.reason
        html = None
        if num_retries > 0:
            if hasattr(e, 'code') and 500 <= e.code < 600:
                # retry 5XX HTTP errors
                html = download5(url, user_agent, proxy, num_retries-1)
    return html


问题


面经


文章

微信
公众号

扫码关注公众号