def __init__(self, name, context=None):
"""Initializes an instance of a CloudML Job.
Args:
name: the name of the job. It can be an operation full name
("projects/[project_id]/jobs/[operation_name]") or just [operation_name].
context: an optional Context object providing project_id and credentials.
"""
super(Job, self).__init__(name)
if context is None:
context = datalab.Context.default()
self._context = context
self._api = discovery.build('ml', 'v1', credentials=self._context.credentials)
if not name.startswith('projects/'):
name = 'projects/' + self._context.project_id + '/jobs/' + name
self._name = name
self._refresh_state()
python类build()的实例源码
def delete(config, disk_name=None, disk_zone=None): # TODO: implement
# submit a request to the gce api for a new disk with the given parameters
# if inputs is not None, run a pipeline job to populate the disk
projectId = config.project_id
zones = [disk_zone if disk_zone is not None else x for x in config.zones.split(',')]
credentials = GoogleCredentials.get_application_default()
http = credentials.authorize(httplib2.Http())
if credentials.access_token_expired:
credentials.refresh(http)
gce = discovery.build('compute', 'v1', http=http)
for z in zones:
try:
resp = gce.disks().delete(project=projectId, zone=z, disk=disk_name).execute()
except HttpError as e:
raise DataDiskError("Couldn't delete data disk {n}: {reason}".format(n=disk_name, reason=e))
while True:
try:
result = gce.zoneOperations().get(project=projectId, zone=z, operation=resp['name']).execute()
except HttpError:
break
else:
if result['status'] == 'DONE':
break
def create(apiName, apiVersion):
credentials = GoogleCredentials.get_application_default()
http = credentials.authorize(httplib2.Http())
if credentials.access_token_expired:
credentials.refresh(http)
return discovery.build(apiName, apiVersion, http)
def get_gcloud_storage():
credentials = GoogleCredentials.get_application_default()
return discovery.build('storage', 'v1', credentials=credentials)
def search(q):
DEVELOPER_KEY = "AIzaSyACCLlnn_hlOpNk5XUBpRqs-iZWpbTm-J4"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
youtube = build(
YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY
)
# Call the search.list method to retrieve results matching the specified
# query term.
search_response = youtube.search().list(
q=q,
part="id,snippet",
maxResults=10,
type='video',
).execute()
return search_response
def create_service():
credentials = appengine.AppAssertionCredentials(SCOPE)
http = httplib2.Http()
http = credentials.authorize(http)
credentials.refresh(http)
return discovery.build('content', 'v1', http=http,
discoveryServiceUrl=DISCOVERY_URL)
def get_client_from_credentials(credentials):
"""Creates Pub/Sub client from a given credentials and returns it."""
if credentials.create_scoped_required():
credentials = credentials.create_scoped(PUBSUB_SCOPES)
http = httplib2.Http(memcache)
credentials.authorize(http)
return discovery.build('pubsub', 'v1', http=http)
def __init__(self, api_discovery_file='vision_api.json'):
self.credentials = GoogleCredentials.get_application_default()
self.service = discovery.build(
'vision', 'v1', credentials=self.credentials,
discoveryServiceUrl=DISCOVERY_URL)
def _create_client(self):
credentials = GoogleCredentials.get_application_default()
return discovery.build(
'vision', 'v1', credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
def __init__(self, api_discovery_file='vision_api.json'):
self.credentials = GoogleCredentials.get_application_default()
self.service = discovery.build(
'vision', 'v1', credentials=self.credentials,
discoveryServiceUrl=DISCOVERY_URL)
def main(input_dir):
"""Walk through all the not-yet-processed image files in the given
directory, extracting any text from them and adding that text to an
inverted index.
"""
# Create a client object for the Vision API
vision = VisionApi()
# Create an Index object to build query the inverted index.
index = Index()
allfileslist = []
# Recursively construct a list of all the files in the given input
# directory.
for folder, subs, files in os.walk(input_dir):
for filename in files:
allfileslist.append(os.path.join(folder, filename))
fileslist = []
for filename in allfileslist:
# Look for text in any files that have not yet been processed.
if index.document_is_processed(filename):
continue
fileslist.append(filename)
for filenames in batch(fileslist):
get_text_from_files(vision, index, filenames)
# [END get_text]
def get_vision_service():
credentials = GoogleCredentials.get_application_default()
return discovery.build('vision', 'v1', credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
# [END get_vision_service]
# [START identify_landmark]
def get_speech_service():
credentials = GoogleCredentials.from_stream('api/googleapi_auth/LecRec-a4f4c7931558.json').create_scoped(
['https://www.googleapis.com/auth/cloud-platform'])
http = httplib2.Http()
credentials.authorize(http)
return discovery.build('speech', 'v1beta1', http=http)
# [END authenticating]
def fetch(self, **kwargs):
''' Fetches an email using the Gmail API users.messages.get()
method. It leverages the IsThisLegit service account to impersonate
the user in order to retrieve the email by message ID. This prevents
users from having to manually accept the OAuth permission dialog before
reporting phishing emails.
Expected kwargs:
userId - The userID who reported the email
messageId - The Gmail message ID to fetch
'''
userId = kwargs.get('userId')
messageId = kwargs.get('messageId')
scopes = ['https://www.googleapis.com/auth/gmail.readonly']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
config['gae']['service_account_key'], scopes=scopes)
delegated_credentials = credentials.create_delegated(userId)
http_auth = delegated_credentials.authorize(Http())
service = build('gmail', 'v1', http=http_auth)
response = service.users().messages().get(
userId=userId, id=messageId, format='raw').execute()
if not response or 'raw' not in response:
raise EmailFetchError('Error fetching email: User {}, thread {}'.
format(userId, messageId))
message = base64.urlsafe_b64decode(str(response['raw']))
return message
def main(argv):
# Load label file
try:
new_lables = json.load(open(argv[1]))
except IndexError:
print("%s <lables.json> required!" % __file__, file=sys.stderr)
sys.exit(1)
except ValueError as err:
print("%s invalid json: %s" % (sys.argv[1], err), file=sys.stderr)
sys.exit(1)
# Pull defaults from metadata
metadata = get_metadata()
project, zone = itemgetter(1, 3)(metadata['zone'].split("/"))
instance_name = metadata['name']
# Google Creds
creds = GoogleCredentials.get_application_default()
# Describe Instance
conn = discovery.build('compute', 'beta', credentials=creds)
instance = conn.instances().get(project=project, zone=zone,
instance=instance_name).execute()
# Label Instance
label(instance['selfLink'], creds.get_access_token().access_token,
label_merge(instance['labels'] if 'labels' in instance else {},
instance["labelFingerprint"], new_lables))
# Label Disks
for i in instance['disks']:
# Skip local disk
if 'source' not in i:
continue
disk = conn.disks().get(project=project, zone=zone,
disk=i['source'].split('/')[-1]).execute()
label(disk['selfLink'], creds.get_access_token().access_token,
label_merge(disk['labels'] if 'labels' in disk else {},
disk["labelFingerprint"], new_lables))
def __get__(self, instance, instance_type):
"""Construct the API client."""
if instance is None:
return self
thread_local = None
try:
app = webapp2.get_app()
# Python Google API clients aren't threadsafe as they use httplib2
# which isn't threadsafe.
thread_local = app.registry.get(self)
if thread_local is None:
thread_local = threading.local()
app.registry[self] = thread_local
except AssertionError:
# When not in a request context, use class thread local.
thread_local = ThreadsafeClientLocal._class_thread_local
cached_client = getattr(thread_local, 'api', None)
if cached_client is None:
credentials = client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(
'https://www.googleapis.com/auth/cloud-platform')
cached_client = discovery.build(
self.service,
self.version,
http=credentials.authorize(self.http),
cache_discovery=self.cache_discovery)
thread_local.api = cached_client
return cached_client
def get_google_auth(service, version='v2'):
credentials = GoogleCredentials.get_application_default()
service_conn = discovery.build(service, version, credentials=credentials)
return service_conn
def get(self):
is_cron = self.request.headers.get('X-Appengine-Cron', False)
# logging.info("is_cron is %s", is_cron)
# Comment out the following check to allow non-cron-initiated requests.
if not is_cron:
return 'Blocked.'
# These env vars are set in app.yaml.
PROJECT = os.environ['PROJECT']
BUCKET = os.environ['BUCKET']
TEMPLATE = os.environ['TEMPLATE_NAME']
# Because we're using the same job name each time, if you try to launch one
# job while another is still running, the second will fail.
JOBNAME = PROJECT + '-twproc-template'
credentials = GoogleCredentials.get_application_default()
service = build('dataflow', 'v1b3', credentials=credentials)
BODY = {
"jobName": "{jobname}".format(jobname=JOBNAME),
"gcsPath": "gs://{bucket}/templates/{template}".format(
bucket=BUCKET, template=TEMPLATE),
"parameters": {"timestamp": str(datetime.datetime.utcnow())},
"environment": {
"tempLocation": "gs://{bucket}/temp".format(bucket=BUCKET),
"zone": "us-central1-f"
}
}
dfrequest = service.projects().templates().create(
projectId=PROJECT, body=BODY)
dfresponse = dfrequest.execute()
logging.info(dfresponse)
self.response.write('Done')
def __init__(self, account_json):
scopes = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite']
credentials = ServiceAccountCredentials.from_json_keyfile_name(account_json, scopes)
self.dns = discovery.build('dns', 'v1', credentials=credentials, cache_discovery=False)
with open(account_json) as account:
self.project_id = json.load(account)['project_id']
def cloud_service(credentials, service, version='v1'):
return discovery.build(service, version, credentials=credentials)