def mainlist(item):
logger.info("[favoritos.py] mainlist")
itemlist=[]
# Crea un listado con las entradas de favoritos
if usingsamba(BOOKMARK_PATH):
ficheros = samba.get_files(BOOKMARK_PATH)
else:
ficheros = os.listdir(BOOKMARK_PATH)
# Ordena el listado por nombre de fichero (orden de incorporacin)
ficheros.sort()
# Rellena el listado
for fichero in ficheros:
try:
# Lee el bookmark
canal,titulo,thumbnail,plot,server,url,fulltitle = readbookmark(fichero)
if canal=="":
canal="favoritos"
# Crea la entrada
# En extra va el nombre del fichero para poder borrarlo
## <-- Aado fulltitle con el titulo de la peli
itemlist.append( Item( channel=canal , action="play" , url=url , server=server, title=fulltitle, thumbnail=thumbnail, plot=plot, fanart=thumbnail, extra=os.path.join( BOOKMARK_PATH, fichero ), fulltitle=fulltitle, folder=False ))
except:
for line in sys.exc_info():
logger.error( "%s" % line )
return itemlist
python类error()的实例源码
def connect(self):
if self.dbconn is None:
log.info(_("Connecting the database."))
try:
self.dbdriver.connect()
except DbProfilerException as e:
log.error(_("Could not connect to the database."),
detail=e.source)
log.error(_("Abort."))
sys.exit(1)
self.dbconn = self.dbdriver.conn
log.info(_("Connected to the database."))
return True
def _query_column_profile(self, column_names, query):
"""Common code shared by PostgreSQL/MySQL/Oracle/MSSQL profilers
to collect column profiles of the table.
Args:
column_names(list): column names.
query(str): a query string to be executed on each database.
Returns:
tuple: (num_rows, minmax, nulls)
minmax and nulls are dictionaries having column names as
the keys.
"""
_minmax = {}
_nulls = {}
num_rows = None
try:
rs = self.dbdriver.q2rs(query)
assert len(rs.resultset) == 1
a = copy.copy(list(rs.resultset[0]))
num_rows = a.pop(0)
log.trace("_query_column_profile: rows %d" % num_rows)
i = 0
while len(a) > 0:
nulls = a.pop(0)
colmin = a.pop(0)
colmax = a.pop(0)
log.trace(("_query_column_profile: col %s %d %s %s" %
(column_names[i], nulls, colmin, colmax)))
_minmax[column_names[i]] = [colmin, colmax]
_nulls[column_names[i]] = nulls
i += 1
except QueryError as e:
log.error(_("Could not get row count/num of "
"nulls/min/max values."),
detail=e.value, query=query)
raise e
log.trace("_query_column_profile: %s" % str(_minmax))
return (num_rows, _minmax, _nulls)
def init(self):
try:
if os.path.exists(self.filename):
log.info(_("The repository already exists."))
return True
self.__init_sqlite3(self.filename)
except Exception as e:
log.error(_("Could not create the repository."), detail=unicode(e))
return False
log.info(_("The repository has been initialized."))
return True
def destroy(self):
try:
if os.path.exists(self.filename):
os.unlink(self.filename)
except Exception as e:
log.error(_("Could not destroy the repository."),
detail=unicode(e))
return False
log.info(_("The repository has been destroyed."))
return True
def set(self, data):
try:
cursor = self._conn.cursor()
cursor.execute("DELETE FROM repo")
self._conn.commit()
except Exception as e:
log.error(_("Could not initialize the repository."),
detail=unicode(e))
return False
for d in data:
self.append_table(d)
return True
def get_table_list(self, database_name=None, schema_name=None,
table_name=None):
table_list = []
cond = []
if database_name:
cond.append("database_name = '%s'" % database_name)
if schema_name:
cond.append("schema_name = '%s'" % schema_name)
if table_name:
cond.append("table_name = '%s'" % table_name)
where = "WHERE (%s)" % " AND ".join(cond) if cond else ''
query = """
SELECT DISTINCT database_name, schema_name, table_name
FROM repo
{0}
ORDER BY database_name, schema_name, table_name
""".format(where)
log.trace("get_table_list: query = %s" % query)
try:
cursor = self._conn.cursor()
for r in cursor.execute(query):
table_list.append([r[0], r[1], r[2]])
except Exception as e:
log.error(_("Could not get data."), detail=unicode(e))
return None
return table_list
def validate_record(self, column_names, column_values):
validated_count = 0
failed_count = 0
assert len(column_names) == len(column_values)
# new record validator
for label in self.record_validators:
validator = self.record_validators[label]
validated_count += 1
try:
if validator.validate(column_names, column_values) is False:
log.trace("VALIDATION FAILED: %s %s %s %s" %
(validator.label, unicode(validator.rule),
validator.column_names, unicode(column_values)))
self._column_counter.incr(validator.rule[0],
validator.label)
failed_count += 1
else:
log.trace("VALIDATION OK: %s %s %s %s" %
(validator.label, unicode(validator.rule),
validator.column_names, unicode(column_values)))
except ValidationError as e:
log.error(u'%s' % e.value)
log.trace("VALIDATION FAILED: %s %s %s %s" %
(validator.label, unicode(validator.rule),
validator.column_names, unicode(column_values)))
self._column_counter.incr(validator.rule[0], validator.label)
failed_count += 1
continue
if failed_count > 0:
return False
return True
def validate_table(self, table_data):
validated_count = 0
failed_count = 0
# Run statistics validators.
for label in self.statistics_validators:
validator = self.statistics_validators[label]
log.info(_("Validating column statistics: %s") %
'; '.join(validator.rule))
validated_count += 1
try:
res = validator.validate(table_data)
except ValidationError as e:
log.error(u'%s' % e.value)
res = False
if res is False:
log.trace("VALIDATION FAILED: %s %s %s" %
(validator.label, unicode(validator.rule),
validator.column_names))
self._column_counter.incr(validator.rule[0], validator.label)
failed_count += 1
else:
log.trace("VALIDATION OK: %s %s %s" %
(validator.label, unicode(validator.rule),
validator.column_names))
return (validated_count, failed_count)
def validate_sql(self, dbdriver):
if dbdriver is None:
raise DriverError(u'Database driver not found.')
validated_count = 0
failed_count = 0
for label in self.sql_validators:
validator = self.sql_validators[label]
log.info(_("Validating with SQL: %s") % '; '.join(validator.rule))
validated_count += 1
try:
res = validator.validate(dbdriver)
except ValidationError as e:
log.error(_("SQL validation error: %s") %
'; '.join(validator.rule),
detail=e.source.value if e.source else None)
self._column_counter.incr(validator.rule[0], validator.label)
failed_count += 1
continue
if res is False:
self._column_counter.incr(validator.rule[0], validator.label)
failed_count += 1
return (validated_count, failed_count)
def export_file(filename, body):
try:
f = open(filename, "w")
f.write(body.encode('utf-8'))
f.close()
log.info(_("Generated %s.") % filename)
except IOError as e:
log.error(_("Could not generate %s: %s") % (filename, unicode(e)))
return False
return True
def __save_mutual_fund_obj_strategy_portion(self, mutual_fund_inst, get_parameter_dict):
logger.info(
"__save_mutual_fund_obj_strategy_portion() function entry. {'get_parameter_dict': %s}" % get_parameter_dict)
# Get mutual fund objective and strategy portion
query_args = {"url": "http://financials.morningstar.com/fund/investObjAndStrategy.html?",
"t": get_parameter_dict["t"],
"region": get_parameter_dict["region"],
"culture": get_parameter_dict["culture"],
"cur": get_parameter_dict["cur"],
"productCode": get_parameter_dict["productCode"]}
request = urllib2.Request(self.mutual_fund_info_url + "?" + urllib.urlencode(query_args))
request.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36")
logger.debug("Http request: %s" % request.get_full_url())
response = urllib2.urlopen(request)
mutual_fund_info_obj_strategy_soup = BeautifulSoup(response.read(), "html.parser")
# Save Objective and Strategy
try:
div_tag_list = mutual_fund_info_obj_strategy_soup.find_all("div")
mutual_fund_inst.inve_objective_strategy = unicode(div_tag_list[1].string).lstrip().rstrip()
logger.debug("Save fund objective and strategy: %s" % mutual_fund_inst.inve_objective_strategy)
except:
mutual_fund_inst.inve_objective_strategy = ""
logger.error("Error reading Invest Objective Strategy of fund %s" % mutual_fund_inst.fund_name)
def download_file(host, file_path, file_name):
try:
ftp = ftplib.FTP(host)
ftp.login()
ftp.cwd(file_path)
ftp.retrbinary('RETR ' + file_path + file_name, open(file_name, 'wb').write)
logger.success('Download: %s' % host + file_path + file_name)
ftp.quit()
except ftplib.all_errors as e:
logger.error('%s\nCannot download file: %s.' % (e, host + file_path + file_name))
def get_response(url):
try:
return urllib2.urlopen(url)
except (urllib2.HTTPError, urllib2.URLError) as error:
raise error
except Exception as error:
logger.error('Exception: %s' % (traceback.format_exc(), error))
def get_ssh_client(hostname, ssh_hostname):
"""Tries to create ssh client
Create ssh client based on the username and ssh key
"""
if not CREDS.SSH_KEYFILE:
logger.errorout("ssh_keyfile not set",
module=COMMAND_MODULE_CUSTOM)
retries = 0
while retries < MAX_SSH_RETRIES:
try:
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ssh_hostname,
username=CREDS.SSH_USER,
port=CREDS.SSH_PORT,
pkey=CREDS.PK,
timeout=CONNECTION_TIMEOUT)
return ssh
except paramiko.BadAuthenticationType:
logger.error("BadAuthenticationType",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except paramiko.AuthenticationException:
logger.error("Authentication failed",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except paramiko.BadHostKeyException:
logger.error("BadHostKeyException",
fix="Edit known_hosts file to remove the entry",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except paramiko.SSHException:
logger.error("SSHException",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return
except Exception as e:
if retries == 0:
logger.error("Problems connecting to host",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM,
error=e.message)
retries += 1
time.sleep(1)
logger.error("Can not connect to host",
hostname=hostname,
module=COMMAND_MODULE_CUSTOM)
return None
def downloadIfNotModifiedSince(url,timestamp):
logger.info("tvalacarta.core.downloadtools downloadIfNotModifiedSince("+url+","+time.ctime(timestamp)+")")
# Convierte la fecha a GMT
fechaFormateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp))
logger.info("fechaFormateada=%s" % fechaFormateada)
# Comprueba si ha cambiado
inicio = time.clock()
req = urllib2.Request(url)
req.add_header('If-Modified-Since', fechaFormateada)
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12')
updated = False
try:
response = urllib2.urlopen(req)
data = response.read()
#info = response.info()
#logger.info( info.headers )
# Si llega hasta aquí, es que ha cambiado
updated = True
response.close()
except urllib2.URLError,e:
# Si devuelve 304 es que no ha cambiado
if hasattr(e,'code'):
logger.info("Codigo de respuesta HTTP : %d" %e.code)
if e.code == 304:
logger.info("No ha cambiado")
updated = False
# Agarra los errores con codigo de respuesta del servidor externo solicitado
else:
for line in sys.exc_info():
logger.error( "%s" % line )
data=""
fin = time.clock()
logger.info("Descargado en %d segundos " % (fin-inicio+1))
return updated,data
# Download history
def unescape(text):
"""Removes HTML or XML character references
and entities from a text string.
keep &, >, < in the source code.
from Fredrik Lundh
http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16)).encode("utf-8")
else:
return unichr(int(text[2:-1])).encode("utf-8")
except ValueError:
logger.info("error de valor")
pass
else:
# named entity
try:
'''
if text[1:-1] == "amp":
text = "&amp;"
elif text[1:-1] == "gt":
text = "&gt;"
elif text[1:-1] == "lt":
text = "&lt;"
else:
print text[1:-1]
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
'''
import htmlentitydefs
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8")
except KeyError:
logger.info("keyerror")
pass
except:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
def update_json_data(dict_node, name_file, node, path=None):
"""
actualiza el json_data de un fichero con el diccionario pasado
@param dict_node: diccionario con el nodo
@type dict_node: dict
@param name_file: Puede ser el nombre de un canal o server (sin incluir extension)
o bien el nombre de un archivo json (con extension)
@type name_file: str
@param node: nodo a actualizar
@param path: Ruta base del archivo json. Por defecto la ruta de settings_channels.
@return result: Devuelve True si se ha escrito correctamente o False si ha dado un error
@rtype: bool
@return json_data
@rtype: dict
"""
logger.info()
from core import config
from core import filetools
json_data = {}
result = False
if not name_file.endswith(".json"):
name_file += "_data.json"
if not path:
path = filetools.join(config.get_data_path(), "settings_channels")
fname = filetools.join(path, name_file)
try:
data = filetools.read(fname)
dict_data = load_json(data)
# es un dict
if dict_data:
if node in dict_data:
logger.debug(" existe el key %s" % node)
dict_data[node] = dict_node
else:
logger.debug(" NO existe el key %s" % node)
new_dict = {node: dict_node}
dict_data.update(new_dict)
else:
logger.debug(" NO es un dict")
dict_data = {node: dict_node}
json_data = dump_json(dict_data)
result = filetools.write(fname, json_data)
except:
logger.error("No se ha podido actualizar %s" % fname)
return result, json_data
def downloadbest(video_urls, title, continuar=False):
logger.info()
# Le da la vuelta, para poner el de más calidad primero ( list() es para que haga una copia )
invertida = list(video_urls)
invertida.reverse()
for elemento in invertida:
# videotitle = elemento[0]
url = elemento[1]
logger.info("Descargando opción " + title + " " + url.encode('ascii', 'ignore'))
# Calcula el fichero donde debe grabar
try:
fullpath = getfilefromtitle(url, title.strip())
# Si falla, es porque la URL no vale para nada
except:
import traceback
logger.error(traceback.format_exc())
continue
# Descarga
try:
ret = downloadfile(url, fullpath, continuar=continuar)
# Llegados a este punto, normalmente es un timeout
except urllib2.URLError, e:
import traceback
logger.error(traceback.format_exc())
ret = -2
# El usuario ha cancelado la descarga
if ret == -1:
return -1
else:
# El fichero ni siquiera existe
if not os.path.exists(fullpath):
logger.info("-> No ha descargado nada, probando con la siguiente opción si existe")
# El fichero existe
else:
tamanyo = os.path.getsize(fullpath)
# Tiene tamaño 0
if tamanyo == 0:
logger.info("-> Descargado un fichero con tamaño 0, probando con la siguiente opción si existe")
os.remove(fullpath)
else:
logger.info("-> Descargado un fichero con tamaño %d, lo da por bueno" % tamanyo)
return 0
return -2
def extract(self, file, dir, folder_to_extract="", overwrite_question=False, backup=False):
logger.info("file=%s" % file)
logger.info("dir=%s" % dir)
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
zf = zipfile.ZipFile(file)
if not folder_to_extract:
self._createstructure(file, dir)
num_files = len(zf.namelist())
for name in zf.namelist():
logger.info("name=%s" % name)
if not name.endswith('/'):
logger.info("no es un directorio")
try:
(path,filename) = os.path.split(os.path.join(dir, name))
logger.info("path=%s" % path)
logger.info("name=%s" % name)
if folder_to_extract:
if path != os.path.join(dir, folder):
break
else:
os.makedirs( path )
except:
pass
if folder_to_extract:
outfilename = os.path.join(dir, filename)
else:
outfilename = os.path.join(dir, name)
logger.info("outfilename=%s" % outfilename)
try:
if os.path.exists(outfilename) and overwrite_question:
from platformcode import platformtools
dyesno = platformtools.dialog_yesno("El archivo ya existe",
"El archivo %s a descomprimir ya existe" \
", ¿desea sobrescribirlo?" \
% os.path.basename(outfilename))
if not dyesno:
break
if backup:
import time
import shutil
hora_folder = "Copia seguridad [%s]" % time.strftime("%d-%m_%H-%M", time.localtime())
backup = os.path.join(config.get_data_path(), 'backups', hora_folder, folder_to_extract)
if not os.path.exists(backup):
os.makedirs(backup)
shutil.copy2(outfilename, os.path.join(backup, os.path.basename(outfilename)))
outfile = open(outfilename, 'wb')
outfile.write(zf.read(name))
except:
logger.error("Error en fichero "+name)