def get_projects(self, resource_name, parent_id=None, parent_type=None,
**filterargs):
"""Get all the projects the authenticated account has access to.
If no parent is passed in, then all projects the caller has visibility
to are returned. This is significantly less efficient then listing by
parent.
Args:
resource_name (str): The resource type.
parent_id (str): The id of the organization or folder parent object.
parent_type (str): Either folder or organization.
**filterargs (dict): Extra project filter args.
Yields:
dict: The projects.list() response.
https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#response-body
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
filters = []
for key, value in filterargs.items():
filters.append('{}:{}'.format(key, value))
if parent_id:
filters.append('parent.id:{}'.format(parent_id))
if parent_type:
filters.append('parent.type:{}'.format(parent_type))
try:
for response in self.repository.projects.list(
filter=' '.join(filters)):
yield response
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_name, e)
python类HttpLib2Error()的实例源码
cloud_resource_manager.py 文件源码
项目:forseti-security
作者: GoogleCloudPlatform
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
cloud_resource_manager.py 文件源码
项目:forseti-security
作者: GoogleCloudPlatform
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def get_project_ancestry(self, project_id):
"""Get the full folder ancestry for a project.
Args:
project_id (str): Either the project number or the project id.
Returns:
list: The ancesters of the project, in order from direct parent to
root organization id.
"""
try:
results = self.repository.projects.get_ancestry(project_id)
return results.get('ancestor', [])
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
cloud_resource_manager.py 文件源码
项目:forseti-security
作者: GoogleCloudPlatform
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def get_project_iam_policies(self, resource_name, project_id):
"""Get all the iam policies of given project numbers.
Args:
resource_name (str): The resource type.
project_id (str): Either the project number or the project id.
Returns:
list: IAM policies of the project.
https://cloud.google.com/resource-manager/reference/rest/Shared.Types/Policy
"""
try:
return self.repository.projects.get_iam_policy(project_id)
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_name, e)
cloud_resource_manager.py 文件源码
项目:forseti-security
作者: GoogleCloudPlatform
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def get_organization(self, org_name):
"""Get organization by org_name.
Args:
org_name (str): The org name with format "organizations/$ORG_ID"
Returns:
dict: The org response object if found, otherwise False.
"""
name = self.repository.organizations.get_name(org_name)
try:
return self.repository.organizations.get(name)
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(org_name, e)
cloud_resource_manager.py 文件源码
项目:forseti-security
作者: GoogleCloudPlatform
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def get_folders(self, resource_name, parent=None, show_deleted=False):
"""Find all folders that the authenticated account has access to.
If no parent is passed in, then all folders the caller has visibility
to are returned. This is significantly less efficient then listing by
parent.
Args:
resource_name (str): The resource type.
parent (str): Optional parent resource, either
'organizations/{org_id}' or 'folders/{folder_id}'.
show_deleted (bool): Determines if deleted folders should be
returned in the results.
Returns:
list: A list of Folder dicts as returned by the API.
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
if parent:
paged_results = self.repository.folders.list(
parent, showDeleted=show_deleted)
else:
query = ''
if not show_deleted:
query = 'lifecycleState=ACTIVE'
paged_results = self.repository.folders.search(query=query)
try:
return api_helpers.flatten_list_results(paged_results, 'folders')
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_name, e)
def get_billing_info(self, project_id):
"""Gets the biling information for a project.
Args:
project_id (int): The project id for a GCP project.
Returns:
dict: A ProjectBillingInfo resource.
https://cloud.google.com/billing/reference/rest/v1/ProjectBillingInfo
{
"name": string,
"projectId": string,
"billingAccountName": string,
"billingEnabled": boolean,
}
Raises:
ApiExecutionError: ApiExecutionError is raised if the call to the
GCP ClodSQL API fails
"""
try:
name = self.repository.projects.get_name(project_id)
return self.repository.projects.get_billing_info(name)
except (errors.HttpError, HttpLib2Error) as e:
LOGGER.warn(api_errors.ApiExecutionError(project_id, e))
raise api_errors.ApiExecutionError('billing_info', e)
def get_bucket_iam_policy(self, bucket):
"""Gets the IAM policy for a bucket.
Args:
bucket (str): The bucket to fetch the policy for.
Returns:
dict: The IAM policies for the bucket.
"""
try:
return self.repository.buckets.get_iam_policy(bucket)
except (errors.HttpError, HttpLib2Error) as e:
LOGGER.warn(api_errors.ApiExecutionError(bucket, e))
raise api_errors.ApiExecutionError('bucketIamPolicy', e)
def get_object_iam_policy(self, bucket, object_name):
"""Gets the IAM policy for an object.
Args:
bucket (str): The bucket to fetch the policy for.
object_name (str): The object name to fetch the policy for.
Returns:
dict: The IAM policies for the object.
"""
try:
return self.repository.objects.get_iam_policy(bucket, object_name)
except (errors.HttpError, HttpLib2Error) as e:
LOGGER.warn(api_errors.ApiExecutionError(bucket, e))
raise api_errors.ApiExecutionError('objectIamPolicy', e)
def get_service_account_iam_policy(self, name):
"""Get IAM policy associated with a service account.
Args:
name (str): The service account name to query, must be in the format
projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL}
Returns:
dict: The IAM policies for the service account.
"""
try:
return self.repository.projects_serviceaccounts.get_iam_policy(name)
except (errors.HttpError, HttpLib2Error) as e:
LOGGER.warn(api_errors.ApiExecutionError(name, e))
raise api_errors.ApiExecutionError('serviceAccountIamPolicy', e)
def updatePhoto(users):
cd = buildGAPIObject(API.DIRECTORY)
filenamePattern = getString(Cmd.OB_PHOTO_FILENAME_PATTERN)
checkForExtraneousArguments()
p = re.compile(u'^(ht|f)tps?://.*$')
i, count, users = getEntityArgument(users)
for user in users:
i += 1
user, userName, _ = splitEmailAddressOrUID(user)
filename = _substituteForUser(filenamePattern, user, userName)
if p.match(filename):
try:
status, image_data = httplib2.Http(disable_ssl_certificate_validation=GC.Values[GC.NO_VERIFY_SSL]).request(filename, u'GET')
if status[u'status'] != u'200':
entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], Msg.NOT_ALLOWED, i, count)
continue
if status[u'content-location'] != filename:
entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], Msg.NOT_FOUND, i, count)
continue
except (httplib2.HttpLib2Error, httplib2.ServerNotFoundError, httplib2.CertificateValidationUnsupported) as e:
entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], str(e), i, count)
continue
else:
image_data = readFile(filename, mode=u'rb', continueOnError=True, displayError=True)
if image_data is None:
entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], None, i, count)
continue
body = {u'photoData': base64.urlsafe_b64encode(image_data)}
try:
callGAPI(cd.users().photos(), u'update',
throw_reasons=[GAPI.USER_NOT_FOUND, GAPI.FORBIDDEN, GAPI.INVALID_INPUT],
userKey=user, body=body, fields=u'')
entityActionPerformed([Ent.USER, user, Ent.PHOTO, filename], i, count)
except GAPI.invalidInput as e:
entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], str(e), i, count)
except (GAPI.userNotFound, GAPI.forbidden):
entityUnknownWarning(Ent.USER, user, i, count)
# gam <UserTypeEntity> delete photo
def testHttpFailure(self, mock_run_flow, mock_storage):
mock_storage.return_value = mock_store = mock.MagicMock()
mock_store.get.return_value = None
mock_run_flow.side_effect = httplib2.HttpLib2Error
output = _GetCommandOutput('fetch', self.json_args + ['userinfo.email'])
self.assertIn('Communication error creating credentials', output)
self.assertEqual(1, mock_store.get.call_count)
self.assertEqual(0, self.mock_test.call_count)
def _GetCredentialsVia3LO(client_info, credentials_filename=None):
credential_store = _GetCredentialStore(credentials_filename,
client_info['client_id'],
client_info['scope'])
credentials = credential_store.get()
if credentials is None or credentials.invalid:
for _ in range(10):
# If authorization fails, we want to retry, rather
# than let this cascade up and get caught elsewhere.
# If users want out of the retry loop, they can ^C.
try:
flow = client.OAuth2WebServerFlow(**client_info)
flags, _ = tools.argparser.parse_known_args(
['--noauth_local_webserver'])
credentials = tools.run_flow(
flow, credential_store, flags)
break
except (SystemExit, client.FlowExchangeError) as e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because
# you reused a token.
pass
except httplib2.HttpLib2Error as e:
raise ValueError(
'Communication error creating credentials:'
'{}'.format(e))
else:
credentials = None
return credentials
def next_chunk(self, num_retries=0):
"""Get the next chunk of the download.
Args:
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
resp, content = _retry_request(
http, num_retries, 'media download', self._sleep, self._rand, self._uri,
'GET', headers=headers)
if resp.status in [200, 206]:
if 'content-location' in resp and resp['content-location'] != self._uri:
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
elif 'content-length' in resp:
self._total_size = int(resp['content-length'])
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, uri=self._uri)
def execute(self, http=None, num_retries=0):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http=http, num_retries=num_retries)
return body
# Non-resumable case.
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
# If the request URI is too long then turn it into a POST request.
if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
self.method = 'POST'
self.headers['x-http-method-override'] = 'GET'
self.headers['content-type'] = 'application/x-www-form-urlencoded'
parsed = urlparse(self.uri)
self.uri = urlunparse(
(parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
None)
)
self.body = parsed.query
self.headers['content-length'] = str(len(self.body))
# Handle retries for server-side errors.
resp, content = _retry_request(
http, num_retries, 'request', self._sleep, self._rand, str(self.uri),
method=str(self.method), body=self.body, headers=self.headers)
for callback in self.response_callbacks:
callback(resp)
if resp.status >= 300:
raise HttpError(resp, content, uri=self.uri)
return self.postproc(resp, content)
def next_chunk(self, num_retries=0):
"""Get the next chunk of the download.
Args:
num_retries: Integer, number of times to retry 500's with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
for retry_num in xrange(num_retries + 1):
if retry_num > 0:
self._sleep(self._rand() * 2**retry_num)
logging.warning(
'Retry #%d for media download: GET %s, following status: %d'
% (retry_num, self._uri, resp.status))
resp, content = http.request(self._uri, headers=headers)
if resp.status < 500:
break
if resp.status in [200, 206]:
if 'content-location' in resp and resp['content-location'] != self._uri:
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, uri=self._uri)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, method='POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, uri=self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp=resp,
content=content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
response, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (response, content)
def doGAMCheckForUpdates(forceCheck=False):
import calendar
def _gamLatestVersionNotAvailable():
if forceCheck:
systemErrorExit(NETWORK_ERROR_RC, Msg.GAM_LATEST_VERSION_NOT_AVAILABLE)
current_version = __version__
now_time = calendar.timegm(time.gmtime())
if forceCheck:
check_url = GAM_ALL_RELEASES # includes pre-releases
else:
last_check_time_str = readFile(GM.Globals[GM.LAST_UPDATE_CHECK_TXT], continueOnError=True, displayError=False)
last_check_time = int(last_check_time_str) if last_check_time_str and last_check_time_str.isdigit() else 0
if last_check_time > now_time-604800:
return
check_url = GAM_LATEST_RELEASE # latest full release
try:
_, c = httplib2.Http(disable_ssl_certificate_validation=GC.Values[GC.NO_VERIFY_SSL]).request(check_url, u'GET', headers={u'Accept': u'application/vnd.github.v3.text+json'})
try:
release_data = json.loads(c)
except ValueError:
_gamLatestVersionNotAvailable()
return
if isinstance(release_data, list):
release_data = release_data[0] # only care about latest release
if not isinstance(release_data, dict) or u'tag_name' not in release_data:
_gamLatestVersionNotAvailable()
return
latest_version = release_data[u'tag_name']
if latest_version[0].lower() == u'v':
latest_version = latest_version[1:]
if forceCheck or (latest_version > current_version):
printKeyValueList([u'Version Check', None])
Ind.Increment()
printKeyValueList([u'Current', current_version])
printKeyValueList([u' Latest', latest_version])
Ind.Decrement()
if latest_version <= current_version:
writeFile(GM.Globals[GM.LAST_UPDATE_CHECK_TXT], str(now_time), continueOnError=True, displayError=forceCheck)
return
announcement = release_data.get(u'body_text', u'No details about this release')
writeStderr(u'\nGAM %s release notes:\n\n' % latest_version)
writeStderr(announcement)
try:
printLine(Msg.HIT_CONTROL_C_TO_UPDATE)
time.sleep(15)
except KeyboardInterrupt:
import webbrowser
webbrowser.open(release_data[u'html_url'])
printLine(Msg.GAM_EXITING_FOR_UPDATE)
sys.exit(0)
writeFile(GM.Globals[GM.LAST_UPDATE_CHECK_TXT], str(now_time), continueOnError=True, displayError=forceCheck)
return
except (httplib2.HttpLib2Error, httplib2.ServerNotFoundError, httplib2.CertificateValidationUnsupported):
return
def next_chunk(self, num_retries=0):
"""Get the next chunk of the download.
Args:
num_retries: Integer, number of times to retry with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
resp, content = _retry_request(
http, num_retries, 'media download', self._sleep, self._rand, self._uri,
'GET', headers=headers)
if resp.status in [200, 206]:
if 'content-location' in resp and resp['content-location'] != self._uri:
self._uri = resp['content-location']
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
elif 'content-length' in resp:
self._total_size = int(resp['content-length'])
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, uri=self._uri)
def execute(self, http=None, num_retries=0):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
num_retries: Integer, number of times to retry with randomized
exponential backoff. If all retries fail, the raised HttpError
represents the last request. If zero (default), we attempt the
request only once.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
googleapiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http=http, num_retries=num_retries)
return body
# Non-resumable case.
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
# If the request URI is too long then turn it into a POST request.
# Assume that a GET request never contains a request body.
if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
self.method = 'POST'
self.headers['x-http-method-override'] = 'GET'
self.headers['content-type'] = 'application/x-www-form-urlencoded'
parsed = urlparse(self.uri)
self.uri = urlunparse(
(parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
None)
)
self.body = parsed.query
self.headers['content-length'] = str(len(self.body))
# Handle retries for server-side errors.
resp, content = _retry_request(
http, num_retries, 'request', self._sleep, self._rand, str(self.uri),
method=str(self.method), body=self.body, headers=self.headers)
for callback in self.response_callbacks:
callback(resp)
if resp.status >= 300:
raise HttpError(resp, content, uri=self.uri)
return self.postproc(resp, content)