def title_from_youtube(bot, url):
try:
youtube_api_key = bot.config.get_by_path(["spotify", "youtube"])
youtube_client = build("youtube", "v3", developerKey=youtube_api_key)
except (KeyError, TypeError) as e:
logger.error("<b>YouTube API key isn't configured:</b> {}".format(e))
return ""
# Regex by mantish from http://stackoverflow.com/a/9102270 to get the
# video id from a YouTube URL.
match = re.match(
r"^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|\&v=)([^#\&\?]*).*", url)
if match and len(match.group(2)) == 11:
video_id = match.group(2)
else:
logger.error("Unable to extract video id: {}".format(url))
return ""
# YouTube response is JSON.
try:
response = youtube_client.videos().list( # pylint: disable=no-member
part="snippet", id=video_id).execute()
items = response.get("items", [])
if items:
return items[0]["snippet"]["title"]
else:
logger.error("<b>YouTube response was empty:</b> {}"
.format(response))
return ""
except YouTubeHTTPError as e:
logger.error("Unable to get video entry from {}, {}".format(url, e))
return ""
python类build()的实例源码
def create_service(self, host):
credentials = oauth.get_or_create_credentials(
scope=OAUTH_SCOPES, storage_key=STORAGE_KEY)
http = httplib2.Http(ca_certs=utils.get_cacerts_path())
http = credentials.authorize(http)
# Kintaro's server doesn't seem to be able to refresh expired tokens
# properly (responds with a "Stateless token expired" error). So we
# manage state ourselves and refresh slightly more often than once
# per hour.
now = datetime.datetime.now()
if self._last_run is None \
or now - self._last_run >= datetime.timedelta(minutes=50):
credentials.refresh(http)
self._last_run = now
url = DISCOVERY_URL.replace('{host}', host)
return discovery.build('content', 'v1', http=http,
discoveryServiceUrl=url)
def tear_down_gce_cluster(conf):
credentials = GoogleCredentials.get_application_default()
gce = discovery.build("compute", "v1", credentials=credentials)
zone_operations = []
for node in conf["nodes"]:
print("Deleting node on virtual machine {}...".format(node["vmID"]))
zone_operations.append(delete_instance(gce, node["vmID"]))
for op in zone_operations:
while True:
result = gce.zoneOperations().get(project=GCP_PROJECT_ID, zone=GCE_ZONE_ID, operation=op["name"]).execute()
if result["status"] == "DONE":
# if "error" in result: raise Exception(result["error"]) # TODO handle error
print("Deleted node on virtual machine {}".format(result["targetLink"].split("/")[-1]))
break
sleep(1)
print("Cluster torn down correctly. Bye!")
def get(self, destination):
service = build('container', 'v1')
cl = service.projects().zones().clusters()
self._zone = self._data.get('zone')
self._project = self._data.get('project')
self._destination = destination
cluster_name = self._data.get('name')
if cluster_name:
cluster = cl.get(projectId=self._project, zone=self._zone, clusterId=cluster_name).execute()
Cluster(self.parse_cluster(cluster)).add(cluster_name)
self.add_cluster_nodepools(cluster_name, cluster.get('nodePools'))
else:
clusters = cl.list(projectId=self._project, zone=self._zone).execute()
for cluster in clusters['clusters']:
Cluster(self.parse_cluster(cluster)).add(cluster['name'])
self.add_cluster_nodepools(cluster.get('name'), cluster.get('nodePools'), self._data.get('get_default_nodepools'))
def find(self, item):
try:
log.info('search for {item} through {class_name}'.format(item=item, class_name=__name__))
service = build("customsearch", "v1",
developerKey=google_developer_key)
#https://developers.google.com/custom-search/json-api/v1/reference/cse/list#response
response = service.cse().list(
q='how much is the ' + item,
cx=google_custom_search_engine_key,
).execute()
#log.debug('RESPONSE = {response}'.format(pprint.pprint(response)))
original_description, price, currency = parse_response(response)
return original_description, price, currency
#TODO identify proper Exception to expect
except Exception as e:
log.error(e)
def authorize(self):
"""
Connect with api and build youtube service
"""
print 'Authorizing...'
if self.youtube:
print 'Already authorized'
return False
self.youtube = build(self.YOUTUBE_API_SERVICE_NAME,
self.YOUTUBE_API_VERSION,
developerKey=self.DEVELOPER_KEY)
#
#
# returns boolean
# True if skip video (not a candidate)
#
def insert_entity(projectId,product, categories, table_name, version="v1",prefix="",items="items"):
db = TinyDB("project_dbs/" + projectId + ".json")
service = discovery.build(product, version, credentials=storage.get())
while categories:
api_entity = getattr(service, categories.pop(0))()
service = api_entity
request = api_entity.list(project=prefix+projectId)
try:
while request is not None:
response = request.execute()
for item in response[items]:
db.table(table_name).insert(item)
try:
request = api_entity.list_next(previous_request=request, previous_response=response)
except AttributeError:
request = None
except KeyError:
pass
def list_projects(project_or_org,specifier):
service = discovery.build('cloudresourcemanager',
'v1',credentials=storage.get())
if project_or_org=="organization":
request = service.projects().list(filter='parent.id:%s' % specifier)
elif project_or_org=="project":
request = service.projects().list(filter='name:%s' % specifier)
else:
raise Exception('Organization or Project not specified.')
while request is not None:
response = request.execute()
for project in response['projects']:
if (project['lifecycleState'] != "DELETE_REQUESTED"):
db.table('Project').insert(project)
request = service.projects().\
list_next(previous_request=request,previous_response=response)
def main():
takephoto() # First take a picture
"""Run a label request on a single image"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open('image.jpg', 'rb') as image:
image_content = base64.b64encode(image.read())
service_request = service.images().annotate(body={
'requests': [{
'image': {
'content': image_content.decode('UTF-8')
},
'features': [{
'type': 'FACE_DETECTION',
'maxResults': 10
}]
}]
})
response = service_request.execute()
print json.dumps(response, indent=4, sort_keys=True) #Print it out and make it somewhat pretty.
def main():
takephoto() # First take a picture
"""Run a label request on a single image"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open('image.jpg', 'rb') as image:
image_content = base64.b64encode(image.read())
service_request = service.images().annotate(body={
'requests': [{
'image': {
'content': image_content.decode('UTF-8')
},
'features': [{
'type': 'LABEL_DETECTION',
'maxResults': 10
}]
}]
})
response = service_request.execute()
print json.dumps(response, indent=4, sort_keys=True) #Print it out and make it somewhat pretty.
def youtube_search(keyword, page_token, max_results=10):
"""
youtube_search?? ??
1. youtube_search ??? arguments? pageToken ??
2. ?? pageToken?? youtube.search()?? ? list? ??? ??
3. search??? request.GET? pageToken?? ????? ??
4. template?? ?????/????? a?? href? GET parameter? ????? ??
"""
youtube = build(
YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY
)
search_response = youtube.search().list(
q=keyword,
part="id,snippet",
maxResults=max_results,
pageToken=page_token
).execute()
return search_response
def wait_for_operation(self, zone, op_response, retry_count=MAX_API_RETRY_COUNT):
try:
while True and op_response is not None:
compute = discovery.build(API_TYPE, API_VERSION, credentials=self.config.credentials)
result = compute.zoneOperations().get(project=self.config.PROJECT_ID, zone=zone, operation=op_response['name']).execute()
if result['status'] == 'DONE' or self.abort_all:
return result['status']
else:
time.sleep(1)
except Exception, exception:
if retry_count > 0 and not self.abort_all:
self._log(API_RETRY_MESSAGE % (sys._getframe().f_code.co_name, exception))
return self.wait_for_operation(zone, op_response, (retry_count - 1))
else:
self._log(API_MAX_RETRY_NESSAGE % (sys._getframe().f_code.co_name, MAX_API_RETRY_COUNT, exception))
def get_drive_service():
'''
Returns an object used to interact with the Google Drive API.
'''
flow = client.flow_from_clientsecrets(
get_credentials_path('secret.json'),
'https://www.googleapis.com/auth/drive')
flow.user_agent = USER_AGENT_NAME
store = Storage(get_credentials_path('storage.dat', False))
credentials = store.get()
if not credentials or credentials.invalid:
flags = tools.argparser.parse_args(args=[])
credentials = tools.run_flow(flow, store, flags)
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
return service
def __init__(self):
"""
Generate an authorized YouTube API client and S3 client
"""
credentials = oauth2client.client.GoogleCredentials(
settings.YT_ACCESS_TOKEN,
settings.YT_CLIENT_ID,
settings.YT_CLIENT_SECRET,
settings.YT_REFRESH_TOKEN,
None,
'https://accounts.google.com/o/oauth2/token',
None)
authorization = credentials.authorize(httplib2.Http())
credentials.refresh(authorization)
self.client = build('youtube', 'v3', credentials=credentials)
self.s3 = boto3.client('s3')
def _get_bigquery_service(self):
"""
Connect to the BigQuery service.
Calling ``GoogleCredentials.get_application_default`` requires that
you either be running in the Google Cloud, or have the
``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path
to a credentials JSON file.
:return: authenticated BigQuery service connection object
:rtype: `googleapiclient.discovery.Resource <http://google.github.io/\
google-api-python-client/docs/epy/googleapiclient.discovery.\
Resource-class.html>`_
"""
logger.debug('Getting Google Credentials')
credentials = GoogleCredentials.get_application_default()
logger.debug('Building BigQuery service instance')
bigquery_service = build('bigquery', 'v2', credentials=credentials)
return bigquery_service
def get_google_service(service_type=None,version=None):
'''
get_url will use the requests library to get a url
:param service_type: the service to get (default is storage)
:param version: version to use (default is v1)
'''
if service_type == None:
service_type = "storage"
if version == None:
version = "v1"
credentials = GoogleCredentials.get_application_default()
return build(service_type, version, credentials=credentials)
##########################################################################################
# GOOGLE STORAGE API #####################################################################
##########################################################################################
def get_build_params(metadata):
'''get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build
:param metadata: a list, each item a dictionary of metadata, in format:
metadata = [{'key': 'repo_url', 'value': repo_url },
{'key': 'repo_id', 'value': repo_id },
{'key': 'credential', 'value': credential },
{'key': 'response_url', 'value': response_url },
{'key': 'token', 'value': token},
{'key': 'commit', 'value': commit }]
'''
params = dict()
for item in metadata:
if item['value'] == None:
response = get_build_metadata(key=item['key'])
item['value'] = response
params[item['key']] = item['value']
if item['key'] not in ['token', 'secret', 'credential']:
bot.info('%s is set to %s' %(item['key'],item['value']))
return params
def wait_operation(operation):
# NOT thread safe
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
# Wait for confirmation that the instance is created
while True:
result = compute.zoneOperations().get(
project=project,
zone=zone,
operation=operation['name']).execute()
if result['status'] == 'DONE':
return False if ('error' in result) else True
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(2)
# [END wait_operation]
# [START list_instances]
def list_instances(project, zone, globalinstances, distro, includeterm):
# NOT thread safe
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
result = compute.instances().list(project=project, zone=zone).execute()
if ('items' in result):
print('%s instances in zone %s:' % (project, zone))
instancenames = []
name = prefix + '-' + distro
if not globalinstances:
name += '-' + format(str(uuid.getnode())[:8:-1])
for instance in result['items']:
if name in instance['name']:
print(' - ' + instance['name'] + ' - ' + instance['status'])
if (instance['status'] == 'RUNNING' or includeterm):
instancenames.append(instance['name'])
return instancenames if (len(instancenames) > 0) else False
return False
# [END list_instances]
# [START check_gceproject]
def _build_request(self, verb, verb_arguments):
"""Builds HttpRequest object.
Args:
verb (str): Request verb (ex. insert, update, delete).
verb_arguments (dict): Arguments to be passed with the request.
Returns:
httplib2.HttpRequest: HttpRequest to be sent to the API.
"""
method = getattr(self._component, verb)
# Python insists that keys in **kwargs be strings (not variables).
# Since we initially build our kwargs as a dictionary where one of the
# keys is a variable (target), we need to convert keys to strings,
# even though the variable in question is of type str.
method_args = {str(k): v for k, v in verb_arguments.iteritems()}
return method(**method_args)
def __init__(self, db_filename=None):
credentials = GoogleCredentials.get_application_default()
scoped_credentials = credentials.create_scoped(
['https://www.googleapis.com/auth/cloud-platform'])
http = httplib2.Http()
scoped_credentials.authorize(http)
self.service = discovery.build('language', 'v1',
http=http,
credentials=credentials)
# This list will store the entity information gleaned from the
# image files.
self.entity_info = []
# This is the filename of the sqlite3 database to save to
self.db_filename = db_filename or 'entities{}.db'.format(
int(time.time()))
def main(project_id, job_name):
"""Review the transfer operations associated with a transfer job."""
credentials = GoogleCredentials.get_application_default()
storagetransfer = discovery.build(
'storagetransfer', 'v1', credentials=credentials)
filterString = (
'{{"project_id": "{project_id}", '
'"job_names": ["{job_name}"]}}'
).format(project_id=project_id, job_name=job_name)
result = storagetransfer.transferOperations().list(
name="transferOperations",
filter=filterString).execute()
print('Result of transferOperations/list: {}'.format(
json.dumps(result, indent=4, sort_keys=True)))
# [END main]
def __init__(self, source_language: str, target_language: str, key: str, translator_name: str = 'Google',
quality: int = 50, service_name: str = 'Google') -> None:
super(GoogleTranslator, self).__init__(
source_language=source_language,
target_language=target_language,
service_name=service_name,
translator_name=translator_name,
quality=quality
)
self.key = key
self.translation_service = build('translate', 'v2', developerKey=key)
self.add_query_processor(EscapeHtml())
self.add_response_processor(UnescapeHtml())
def post(self):
if self.json_body:
event_id = self.json_body.get('event_id')
language = self.json_body.get('language') or self.json_body.get('locale')
if not event_id:
self.add_error('Need to pass event_id argument')
if not language:
self.add_error('Need to pass language/locale argument')
else:
self.add_error('Need to pass a post body of json params')
# Remap our traditional/simplified chinese languages
if language == 'zh':
language = 'zh-TW'
elif language == 'zh-Hant':
language = 'zh-TW'
elif language == 'zh-Hans':
language = 'zh-CN'
self.errors_are_fatal()
db_event = eventdata.DBEvent.get_by_id(event_id)
service = build('translate', 'v2', developerKey=keys.get('google_server_key'))
result = service.translations().list(target=language, format='text', q=[db_event.name or '', db_event.description or '']).execute()
translations = [x['translatedText'] for x in result['translations']]
self.write_json_success({'name': translations[0], 'description': translations[1]})
def analize(self, text):
http = httplib2.Http()
self.scoped_credentials.authorize(http)
service = discovery.build('language', 'v1beta1', http=http)
body = {
'document': {
'type': 'PLAIN_TEXT',
'content': text,
},
'features': {
'extract_syntax': True,
},
'encodingType': 'UTF16',
}
request = service.documents().annotateText(body=body)
return request.execute()
def main(max_results):
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
global service
service = discovery.build('gmail', 'v1', http=http)
try:
user_id = "me"
label_id = ["INBOX","IMPORTANT"]
query = "is:unread"
response = service.users().messages().list(userId=user_id,labelIds=label_id,maxResults=max_results,q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id,labelIds=label_id,maxResults=max_results,q=query,pageToken=page_token).execute()
return messages
except errors.HttpError, error:
print 'An error occurred: %s' % error
def setUp(self):
self.clientquery = {'property_uri': 'https://www.example.com/',
'siteMode': 'en-us',
'clientName': 'Example',
'query_date': '2016-09-01'}
self.emptyresponse = '''{"responseAggregationType": "byPage"}'''
self.p = (os.path.dirname(os.path.abspath(__file__)))
self.build_response_data = '%s/build_response_data.json' % (self.p)
self.http_auth = HttpMockSequence([
({'status': '200'}, open(self.build_response_data, 'rb').read()),
({'status': '200'}, self.emptyresponse.encode('UTF-8'))
])
self.service = build('webmasters',
'v3',
http=self.http_auth,
developerKey='mocked_api_key_1234')
# Mock the service attribute within Apiclient
self.mocked_prop = PropertyMock(return_value=self.service)
def setUp(self):
self.clientquery = {'property_uri': 'https://www.example.com/',
'siteMode': 'en-us',
'clientName': 'Example',
'query_date': '2016-09-01'}
self.response = '''{"error": {"errors": [{"domain": "global","reason": "forbidden",
"message": "User does not have sufficient permission for site 'https://www.example.com/'. See also: https://support.google.com/webmasters/answer/2451999."}],
"code": 403,
"message": "User does not have sufficient permission for site 'https://www.example.com/'. See also: https://support.google.com/webmasters/answer/2451999."}}'''
self.p = (os.path.dirname(os.path.abspath(__file__)))
self.build_response_data = '%s/build_response_data.json' % (self.p)
self.http_auth = HttpMockSequence([
({'status': '403'}, open(self.build_response_data, 'rb').read()),
({'status': '403'}, self.response.encode('UTF-8'))
])
self.service = build('webmasters',
'v3',
http=self.http_auth,
developerKey='mocked_api_key_1234')
self.mocked_prop = PropertyMock(return_value=self.service)
def flush(self):
if not self.entries:
return
for _repeat in range(6):
try:
self.body['entries'] = self.entries
resp = self.connection.entries().write(
body=self.body).execute()
self.entries = []
break
except IOError as e:
sleep(_repeat * 2 + 1)
if e.errno == errno.EPIPE:
credentials = GoogleCredentials.get_application_default()
self.connection = build('logging', 'v2beta1', credentials=credentials)
except Exception:
sleep(_repeat * 2 + 5)
def __init__(self, model_name, project_id=None):
"""
Args:
model_name: the name of the model. It can be a model full name
("projects/[project_id]/models/[model_name]") or just [model_name].
project_id: project_id of the models. If not provided and model_name is not a full name
(not including project_id), default project_id will be used.
"""
if project_id is None:
self._project_id = datalab.Context.default().project_id
self._credentials = datalab.Context.default().credentials
self._api = discovery.build('ml', 'v1', credentials=self._credentials)
if not model_name.startswith('projects/'):
model_name = ('projects/%s/models/%s' % (self._project_id, model_name))
self._full_model_name = model_name
self._model_name = self._full_model_name.split('/')[-1]