def prepare_zip():
from pkg_resources import resource_filename as resource
from config import config
from json import dumps
logger.info('creating/updating gimel.zip')
with ZipFile('gimel.zip', 'w', ZIP_DEFLATED) as zipf:
info = ZipInfo('config.json')
info.external_attr = 0o664 << 16
zipf.writestr(info, dumps(config))
zipf.write(resource('gimel', 'config.py'), 'config.py')
zipf.write(resource('gimel', 'gimel.py'), 'gimel.py')
zipf.write(resource('gimel', 'logger.py'), 'logger.py')
for root, dirs, files in os.walk(resource('gimel', 'vendor')):
for file in files:
real_file = os.path.join(root, file)
relative_file = os.path.relpath(real_file,
resource('gimel', ''))
zipf.write(real_file, relative_file)
python类info()的实例源码
def role():
new_role = False
try:
logger.info('finding role')
iam('get_role', RoleName='gimel')
except ClientError:
logger.info('role not found. creating')
iam('create_role', RoleName='gimel',
AssumeRolePolicyDocument=ASSUMED_ROLE_POLICY)
new_role = True
role_arn = iam('get_role', RoleName='gimel', query='Role.Arn')
logger.debug('role_arn={}'.format(role_arn))
logger.info('updating role policy')
iam('put_role_policy', RoleName='gimel', PolicyName='gimel',
PolicyDocument=POLICY)
if new_role:
from time import sleep
logger.info('waiting for role policy propagation')
sleep(5)
return role_arn
def _function_alias(name, version, alias=LIVE):
try:
logger.info('creating function alias {0} for {1}:{2}'.format(
alias, name, version))
arn = aws_lambda('create_alias',
FunctionName=name,
FunctionVersion=version,
Name=alias,
query='AliasArn')
except ClientError:
logger.info('alias {0} exists. updating {0} -> {1}:{2}'.format(
alias, name, version))
arn = aws_lambda('update_alias',
FunctionName=name,
FunctionVersion=version,
Name=alias,
query='AliasArn')
return arn
def preflight_checks():
logger.info('checking aws credentials and region')
if region() is None:
logger.error('Region is not set up. please run aws configure')
return False
try:
check_aws_credentials()
except AttributeError:
logger.error('AWS credentials not found. please run aws configure')
return False
logger.info('testing redis')
try:
from gimel import _redis
_redis().ping()
except redis.exceptions.ConnectionError:
logger.error('Redis ping failed. Please run gimel configure')
return False
return True
def get_channel_local_path(channel_name):
# TODO: (3.2) El XML debería escribirse en el userdata, de forma que se leerán dos ficheros locales: el del userdata y el que está junto al py (vendrá con el plugin). El mayor de los 2 es la versión actual, y si no existe fichero se asume versión 0
if channel_name<>"channelselector":
local_channel_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".py" )
local_version_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".xml" )
local_compiled_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".pyo" )
else:
local_channel_path = os.path.join( config.get_runtime_path() , channel_name+".py" )
local_version_path = os.path.join( config.get_runtime_path() , channel_name+".xml" )
local_compiled_path = os.path.join( config.get_runtime_path() , channel_name+".pyo" )
logger.info("tvalacarta.core.updater local_channel_path="+local_channel_path)
logger.info("tvalacarta.core.updater local_version_path="+local_version_path)
logger.info("tvalacarta.core.updater local_compiled_path="+local_compiled_path)
return local_channel_path , local_version_path , local_compiled_path
def downloadpageWithoutCookies(url):
logger.info("tvalacarta.core.scrapertools Descargando " + url)
inicio = time.clock()
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14')
req.add_header('X-Requested-With','XMLHttpRequest')
try:
response = urllib2.urlopen(req)
except:
req = urllib2.Request(url.replace(" ","%20"))
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14')
response = urllib2.urlopen(req)
data=response.read()
response.close()
fin = time.clock()
logger.info("tvalacarta.core.scrapertools Descargado en %d segundos " % (fin-inicio+1))
return data
def connect(server_name,user,password):
import smb,nmb
logger.info("[samba.py] Crea netbios...")
netbios = nmb.NetBIOS()
logger.info("[samba.py] Averigua IP...")
nbhost = netbios.gethostbyname(server_name)
server_ip = nbhost[0].get_ip()
logger.info("[samba.py] server_ip="+server_ip)
logger.info("[samba.py] Crea smb...")
remote = smb.SMB(server_name, server_ip)
logger.info("ok")
if remote.is_login_required():
logger.info("[samba.py] Login...")
if user=="":
logger.info("[samba.py] User vacio, se asume 'guest'")
user="guest"
remote.login(user, password)
else:
logger.info("[samba.py] Login no requerido")
return remote
def get_files(url):
logger.info("[samba.py] get_files")
# Separa la URL en los elementos
server_name,share_name,path,user,password = parse_url(url)
# Conecta con el servidor remoto
remote = connect(server_name,user,password)
ficheros = []
for f in remote.list_path(share_name, path + '*'):
name = f.get_longname()
#logger.info("[samba.py] name="+name)
if name == '.' or name == '..':
continue
if f.is_directory():
continue
ficheros.append(name)
return ficheros
def already_suscribed(item):
logger.info("suscription.already_suscribed item="+item.tostring())
current_suscriptions = _read_suscription_file()
# Check if suscription already on file
existe = False
for suscription_item in current_suscriptions:
logger.info("suscription.already_suscribed suscription_item="+suscription_item.tostring())
if suscription_item.url == item.url:
existe = True
break
logger.info("suscription.already_suscribed -> "+repr(existe))
return existe
# ------------------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------------------
# Read suscriptions from file
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
logger.info("payload="+repr(payload))
logger.info("symtab="+repr(symtab))
logger.info("radix="+repr(radix))
logger.info("count="+repr(count))
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def get_channel_local_path(channel_name):
if channel_name != "channelselector":
local_channel_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".py")
local_version_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".xml")
local_compiled_path = os.path.join(config.get_runtime_path(), 'channels', channel_name + ".pyo")
else:
local_channel_path = os.path.join(config.get_runtime_path(), channel_name + ".py")
local_version_path = os.path.join(config.get_runtime_path(), channel_name + ".xml")
local_compiled_path = os.path.join(config.get_runtime_path(), channel_name + ".pyo")
logger.info("local_channel_path=" + local_channel_path)
logger.info("local_version_path=" + local_version_path)
logger.info("local_compiled_path=" + local_compiled_path)
return local_channel_path, local_version_path, local_compiled_path
def get_channel_local_path(channel_name):
# TODO: (3.2) El XML debería escribirse en el userdata, de forma que se leerán dos ficheros locales: el del userdata y el que está junto al py (vendrá con el plugin). El mayor de los 2 es la versión actual, y si no existe fichero se asume versión 0
if channel_name<>"channelselector":
local_channel_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".py" )
local_version_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".xml" )
local_compiled_path = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".pyo" )
else:
local_channel_path = os.path.join( config.get_runtime_path() , channel_name+".py" )
local_version_path = os.path.join( config.get_runtime_path() , channel_name+".xml" )
local_compiled_path = os.path.join( config.get_runtime_path() , channel_name+".pyo" )
logger.info("streamondemand-pureita.core.updater local_channel_path="+local_channel_path)
logger.info("streamondemand-pureita.core.updater local_version_path="+local_version_path)
logger.info("streamondemand-pureita.core.updater local_compiled_path="+local_compiled_path)
return local_channel_path , local_version_path , local_compiled_path
scrapertools_old.py 文件源码
项目:plugin.video.streamondemand-pureita
作者: orione7
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def downloadpageWithoutCookies(url):
logger.info("streamondemand-pureita.core.scrapertools Descargando " + url)
inicio = time.clock()
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14')
req.add_header('X-Requested-With','XMLHttpRequest')
try:
response = urllib2.urlopen(req)
except:
req = urllib2.Request(url.replace(" ","%20"))
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14')
response = urllib2.urlopen(req)
data=response.read()
response.close()
fin = time.clock()
logger.info("streamondemand-pureita.core.scrapertools Descargado en %d segundos " % (fin-inicio+1))
return data
def get_files(url):
logger.info("[samba.py] get_files")
# Separa la URL en los elementos
server_name,share_name,path,user,password = parse_url(url)
# Conecta con el servidor remoto
remote = connect(server_name,user,password)
ficheros = []
for f in remote.list_path(share_name, path + '*'):
name = f.get_longname()
#logger.info("[samba.py] name="+name)
if name == '.' or name == '..':
continue
if f.is_directory():
continue
ficheros.append(name)
return ficheros
pyload_client.py 文件源码
项目:plugin.video.streamondemand-pureita
作者: orione7
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def download(url,package_name):
logger.info("pyload_client.download url="+url+", package_name="+package_name)
session = login(config.get_setting("pyload_user"),config.get_setting("pyload_password"))
package_id = find_package_id(package_name)
if package_id is None:
api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addPackage")
logger.info("pyload_client.download api_url="+api_url)
data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"name":"'"+package_name+"'","links":str([url])} ) )
logger.info("pyload_client.download data="+data)
else:
api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addFiles")
logger.info("pyload_client.download api_url="+api_url)
data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"pid":str(package_id),"links":str([url])} ) )
logger.info("pyload_client.download data="+data)
return
def _run_record_validation(self, schema_name, table_name, tablemeta,
columnmeta, validation_rules,
skip_record_validation):
log.info(_("Record validation: start"))
if skip_record_validation:
log.info(_("Record validation: skipping"))
return
if not validation_rules:
log.info(_("Record validation: no validation rule"))
return
validation = self.run_record_validation(schema_name, table_name,
validation_rules)
assert isinstance(validation, dict)
for col in tablemeta.column_names:
if validation and col in validation:
columnmeta[col].validation = validation[col]
log.info(_("Record validation: end"))
def run_postscan_validation(self, schema_name, table_name, tablemeta,
columnmeta, table_data, validation_rules):
if not validation_rules:
return table_data
v = DbProfilerValidator.DbProfilerValidator(table_data['schema_name'],
table_data['table_name'],
self, validation_rules)
log.info(_("Column statistics validation: start"))
validated1, failed1 = v.validate_table(table_data)
log.info(_("Column statistics validation: end (%d)") % validated1)
log.info(_("SQL validation: start"))
validated2, failed2 = v.validate_sql(self.dbdriver)
log.info(_("SQL validation: end (%d)") % validated2)
v.update_table_data(table_data)
return table_data
def get(self):
jsondata = u""
try:
data_all = []
cursor = self._conn.cursor()
for r in cursor.execute("SELECT * FROM repo"):
data_all.append(json.loads(unicode(r[4])))
log.info(_("Retrieved all data from the repository `%s'.") %
self.filename)
except Exception as e:
log.error(_("Could not retreive from the repository `%s'") %
self.filename, detail=unicode(e))
return None
return data_all
def get_datamap_source_tables(self, database_name, schema_name,
table_name):
"""Get source talbe names from the data mapping info.
Args:
database_name (str):
schema_name (str):
tablename (str):
Returns:
list: a list of source table names.
"""
assert database_name and schema_name and table_name
tables = []
for d in self.get_datamap_items(database_name, schema_name,
table_name):
if d['source_table_name'] not in tables:
tables.append(d['source_table_name'])
return tables
def verify(self):
repo = DbProfilerRepository.DbProfilerRepository(self.repofile)
repo.open()
log.info(_("Verifying the validation results."))
table_list = repo.get_table_list()
valid = 0
invalid = 0
for t in table_list:
table = repo.get_table(t[0], t[1], t[2])
v, i = verify_table(table)
valid += v
invalid += i
if invalid == 0:
log.info(_("No invalid results: %d/%d") % (invalid, valid+invalid))
else:
log.info(_("Invalid results: %d/%d") % (invalid, valid+invalid))
repo.close()
return (True if invalid > 0 else False)
def export_json(repo, tables=[], output_path='./json'):
json_data = []
try:
f = open(output_path + "/EXPORT.JSON", "a")
for tab in tables:
database_name = tab[0]
schema_name = tab[1]
table_name = tab[2]
data = repo.get_table(database_name, schema_name, table_name)
json_data.append(data)
f.write(json.dumps(json_data, indent=2).encode('utf-8'))
f.close()
log.info(_("Generated JSON file."))
except IOError, e:
log.error(_("Could not generate JSON file."))
sys.exit(1)
return True
def preflight():
logger.info('running preflight checks')
preflight_checks()
def deploy(preflight):
if preflight:
logger.info('running preflight checks')
if not preflight_checks():
return
logger.info('deploying')
run()
js_code_snippet()
def configure():
from config import config, config_filename, generate_config
if not config:
logger.info('generating new config {}'.format(config_filename))
generate_config(config_filename)
click.edit(filename=config_filename)
def rollback_lambda(name, alias=LIVE):
all_versions = _versions(name)
live_version = _get_version(name, alias)
try:
live_index = all_versions.index(live_version)
if live_index < 1:
raise RuntimeError('Cannot find previous version')
prev_version = all_versions[live_index - 1]
logger.info('rolling back to version {}'.format(prev_version))
_function_alias(name, prev_version)
except RuntimeError as error:
logger.error('Unable to rollback. {}'.format(repr(error)))
def deploy_api(api_id):
logger.info('deploying API')
return apigateway('create_deployment', restApiId=api_id,
description='gimel deployment',
stageName='prod',
stageDescription='gimel production',
cacheClusterEnabled=False,
query='id')
def create_update_lambda(role_arn, wiring):
name, handler, memory, timeout = (wiring[k] for k in ('FunctionName',
'Handler',
'MemorySize',
'Timeout'))
try:
logger.info('finding lambda function')
function_arn = aws_lambda('get_function',
FunctionName=name,
query='Configuration.FunctionArn')
except ClientError:
function_arn = None
if not function_arn:
logger.info('creating new lambda function {}'.format(name))
with open('gimel.zip', 'rb') as zf:
function_arn, version = aws_lambda('create_function',
FunctionName=name,
Runtime='python2.7',
Role=role_arn,
Handler=handler,
MemorySize=memory,
Timeout=timeout,
Publish=True,
Code={'ZipFile': zf.read()},
query='[FunctionArn, Version]')
else:
logger.info('updating lambda function {}'.format(name))
with open('gimel.zip', 'rb') as zf:
function_arn, version = aws_lambda('update_function_code',
FunctionName=name,
Publish=True,
ZipFile=zf.read(),
query='[FunctionArn, Version]')
function_arn = _function_alias(name, version)
_cleanup_old_versions(name)
logger.debug('function_arn={} ; version={}'.format(function_arn, version))
return function_arn
def create_update_api(role_arn, function_arn, wiring):
logger.info('creating or updating api /{}'.format(wiring['pathPart']))
api_id = get_create_api()
resource_id = resource(api_id, wiring['pathPart'])
uri = function_uri(function_arn, region())
api_method(api_id, resource_id, role_arn, uri, wiring)
cors(api_id, resource_id)
def _load_config(config_filename):
try:
with open(config_filename) as config_file:
logger.info('Using config {}'.format(config_filename))
return config_file.name, json.load(config_file)
except IOError:
logger.debug('trying to load {} (not found)'.format(config_filename))
return config_filename, {}
def pull_repo(self, force=False):
"""Clone repo to specified dir. Delete repo if it currently exist unless reuse.
"""
try:
helpers.create_path(self.paths['absolute_path'], True)
if force:
self.delete_repo()
if not os.path.exists(self.paths['repo_path']):
logger.info("Starting Repo Cloning", track=self.track)
output, rc = helpers.run(
"git clone -b %s %s" % (self.branch, self.url),
self.paths['absolute_path'],
self.dryrun)
if rc > 0:
self.delete_repo()
logger.error("Pulling_repo", error=output, path=self.paths['repo_path'])
return -1
return 1
else:
return 0
except Exception as e:
logger.errorout("Pulling_repo", err_msg=e.message,
error="Error pulling repo", path=self.paths['repo_path'])