def get_heartbeat_msgs(most_recent=None):
# These two expressions are SQL CASE conditional expressions, later
# used within count(distinct ...) aggregates for the query.
internal_expr = case([(Record.external == False, Record.machine_id), ]).label('internal_count')
external_expr = case([(Record.external == True, Record.machine_id), ]).label('external_count')
q = db.session.query(Build.build, db.func.count(db.distinct(internal_expr)), db.func.count(db.distinct(external_expr)))
q = q.join(Record).join(Classification)
q = q.filter(Classification.classification == "org.clearlinux/heartbeat/ping")
q = q.filter(Record.os_name == 'clear-linux-os')
q = q.group_by(Build.build)
if most_recent:
interval_sec = 24 * 60 * 60 * int(most_recent)
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.tsp > sec_in_past)
q = q.order_by(cast(Build.build, db.Integer))
return q.all()
python类case()的实例源码
def get_heartbeat_msgs(most_recent=None):
# These two expressions are SQL CASE conditional expressions, later
# used within count(distinct ...) aggregates for the query.
internal_expr = case([(Record.external == False, Record.machine_id), ]).label('internal_count')
external_expr = case([(Record.external == True, Record.machine_id), ]).label('external_count')
q = db.session.query(Build.build, db.func.count(db.distinct(internal_expr)), db.func.count(db.distinct(external_expr)))
q = q.join(Record).join(Classification)
q = q.filter(Classification.classification == "org.clearlinux/heartbeat/ping")
q = q.filter(Record.os_name == 'clear-linux-os')
q = q.group_by(Build.build)
if most_recent:
interval_sec = 24 * 60 * 60 * int(most_recent)
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.tsp > sec_in_past)
q = q.order_by(cast(Build.build, db.Integer))
return q.all()
def get_heartbeat_msgs(most_recent=None):
# These two expressions are SQL CASE conditional expressions, later
# used within count(distinct ...) aggregates for the query.
internal_expr = case([(Record.external == False, Record.machine_id), ]).label('internal_count')
external_expr = case([(Record.external == True, Record.machine_id), ]).label('external_count')
q = db.session.query(Build.build, db.func.count(db.distinct(internal_expr)), db.func.count(db.distinct(external_expr)))
q = q.join(Record).join(Classification)
q = q.filter(Classification.classification == "org.clearlinux/heartbeat/ping")
q = q.filter(Record.os_name == 'clear-linux-os')
q = q.group_by(Build.build)
if most_recent:
interval_sec = 24 * 60 * 60 * int(most_recent)
current_time = time()
sec_in_past = current_time - interval_sec
q = q.filter(Record.tsp > sec_in_past)
q = q.order_by(cast(Build.build, db.Integer))
return q.all()
def seed_identifier(cls): # @NoSelf
'''returns data_identifier if the latter is not None, else net.sta.loc.cha by querying the
relative channel and station'''
# Needed note: To know what we are doing in 'sel' below, please look:
# http://docs.sqlalchemy.org/en/latest/orm/extensions/hybrid.html#correlated-subquery-relationship-hybrid
# Notes
# - we use limit(1) cause we might get more than one
# result. Regardless of why it happens (because we don't join or apply a distinct?)
# it is relevant for us to get the first result which has the requested
# network+station and location + channel strings
# - the label(...) at the end makes all the difference. The doc is, as always, unclear
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.label
dot = text("'.'")
sel = select([concat(Station.network, dot, Station.station, dot,
Channel.location, dot, Channel.channel)]).\
where((Channel.id == cls.channel_id) & (Station.id == Channel.station_id)).limit(1).\
label('seedidentifier')
return case([(cls.data_identifier.isnot(None), cls.data_identifier)],
else_=sel)
def hcase(cond, if_true, if_false):
if isinstance(cond, (bool, int)):
return if_true if cond else if_false
else:
return sqle.case([(cond, if_true)], else_=if_false)
def hcasel(cond, if_true, if_false):
if isinstance(cond, (bool, int)):
return if_true() if cond else if_false()
else:
return sqle.case([(cond, if_true())], else_=if_false())
def create_conversion_batch(entity_name, entity_id, format, user_id):
entity_name = entity_name.upper()
if entity_name == 'AUTHOR':
author = model.Author.__table__
q = select([case([(author.c.first_name == None, author.c.last_name)],
else_ = author.c.first_name + ' ' + author.c.last_name)])\
.where(author.c.id == entity_id)
elif entity_name == 'SERIES':
series = model.Series.__table__
q = select([series.c.title]).where(series.c.id == entity_id)
elif entity_name == 'BOOKSHELF':
shelf = model.Bookshelf.__table__
q = select([shelf.c.name]).where(shelf.c.id == entity_id)
else:
raise ValueError('Invalid entity name')
format_id = await get_format_id(format)
async with engine.acquire() as conn:
batch = model.ConversionBatch.__table__
res = await conn.execute(q)
name = await res.scalar()
name = "Books for %s %s" % (entity_name.lower(), name)
res = await conn.execute(batch.insert()\
.values(name=name, for_entity=entity_name,
entity_id=entity_id, format_id=format_id,
created_by_id = user_id,
modified_by_id = user_id, version_id =1 )\
.returning(batch.c.id))
return await res.scalar()
def update_replica_lock_counter(rse, scope, name, value, rse_id=None, session=None):
"""
Update File replica lock counters.
:param rse: the rse name.
:param scope: the tag name.
:param name: The data identifier name.
:param value: The number of created/deleted locks.
:param rse_id: The id of the RSE.
:param session: The database session in use.
:returns: True or False.
"""
if not rse_id:
rse_id = get_rse_id(rse=rse, session=session)
# WTF BUG in the mysql-driver: lock_cnt uses the already updated value! ACID? Never heard of it!
if session.bind.dialect.name == 'mysql':
rowcount = session.query(models.RSEFileAssociation).\
filter_by(rse_id=rse_id, scope=scope, name=name).\
update({'lock_cnt': models.RSEFileAssociation.lock_cnt + value,
'tombstone': case([(models.RSEFileAssociation.lock_cnt + value < 0,
datetime.utcnow()), ],
else_=None)},
synchronize_session=False)
else:
rowcount = session.query(models.RSEFileAssociation).\
filter_by(rse_id=rse_id, scope=scope, name=name).\
update({'lock_cnt': models.RSEFileAssociation.lock_cnt + value,
'tombstone': case([(models.RSEFileAssociation.lock_cnt + value == 0,
datetime.utcnow()), ],
else_=None)},
synchronize_session=False)
return bool(rowcount)
def list_expired_temporary_dids(rse, limit, worker_number=None, total_workers=None,
session=None):
"""
List expired temporary DIDs.
:param rse: the rse name.
:param limit: The maximum number of replicas returned.
:param worker_number: id of the executing worker.
:param total_workers: Number of total workers.
:param session: The database session in use.
:returns: a list of dictionary replica.
"""
rse_id = get_rse_id(rse, session=session)
is_none = None
query = session.query(models.TemporaryDataIdentifier.scope,
models.TemporaryDataIdentifier.name,
models.TemporaryDataIdentifier.path,
models.TemporaryDataIdentifier.bytes).\
with_hint(models.TemporaryDataIdentifier, "INDEX(tmp_dids TMP_DIDS_EXPIRED_AT_IDX)", 'oracle').\
filter(case([(models.TemporaryDataIdentifier.expired_at != is_none, models.TemporaryDataIdentifier.rse_id), ]) == rse_id)
if worker_number and total_workers and total_workers - 1 > 0:
if session.bind.dialect.name == 'oracle':
bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]
query = query.filter(text('ORA_HASH(name, :total_workers) = :worker_number', bindparams=bindparams))
elif session.bind.dialect.name == 'mysql':
query = query.filter(text('mod(md5(name), %s) = %s' % (total_workers - 1, worker_number - 1)))
elif session.bind.dialect.name == 'postgresql':
query = query.filter(text('mod(abs((\'x\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1)))
return [{'path': path,
'rse': rse,
'rse_id': rse_id,
'scope': scope,
'name': name,
'bytes': bytes}
for scope, name, path, bytes in query.limit(limit)]
def band_code(cls): # @NoSelf
'''returns the sql expression returning the first letter of the channel field,
or NULL if the latter has not length 3'''
# return an sql expression matching the last char or None if not three letter channel
return substr(cls.channel, 1, 1)
# return case([(func.length(cls.channel) == 3, func.substr(cls.channel, 1, 1))], else_=null())
def instrument_code(cls): # @NoSelf
'''returns the sql expression returning the second letter of the channel field,
or NULL if the latter has not length 3'''
# return an sql expression matching the last char or None if not three letter channel
return substr(cls.channel, 2, 1)
# return case([(func.length(cls.channel) == 3, func.substr(cls.channel, 2, 1))], else_=null())
def orientation_code(cls): # @NoSelf
'''returns the sql expression returning the third letter of the channel field,
or NULL if the latter has not length 3'''
# return an sql expression matching the last char or None if not three letter channel
return substr(cls.channel, 3, 1)
#return case([(func.length(cls.channel) == 3, func.substr(cls.channel, 3, 1))], else_=null())
def query4dreport(session, **binexprs2count):
'''Returns a query yielding the segments ids for the visualization in the GUI (download
report)
'''
# We should get something along the lines of:
# SELECT data_centers.id AS data_centers_id,
# stations.id AS stations_id,
# count(segments.id) as csi,
# count(segments.sample_rate != channels.sample_rate) as segid
# FROM
# data_centers
# JOIN stations ON data_centers.id = stations.datacenter_id
# JOIN channels on channels.station_id = stations.id
# JOIN segments on segments.channel_id = channels.id
# GROUP BY stations.id
def countif(key, binexpr):
NULL = literal_column("NULL")
return func.count(case([(binexpr, Segment.id)], else_=NULL)).label(key)
qry = session.query(DataCenter.id.label('dc_id'), # @UndefinedVariable
Station.id.label('station_id'),
Station.latitude.label('lat'),
Station.longitude.label('lon'),
func.count(Segment.id).label('num_segments'),
*[countif(k, v) for k, v in binexprs2count.items()])
# ok seems that back referenced relationships are instantiated only after the first query is
# made:
# https://stackoverflow.com/questions/14921777/backref-class-attribute
# workaround:
configure_mappers()
return qry.join(DataCenter.stations, # @UndefinedVariable
Station.channels, # @UndefinedVariable
Channel.segments, # @UndefinedVariable
).group_by(DataCenter.id, Station.id)
def best_lang_old(self, locale_codes):
# based on a simple ordered list of locale_codes
locale_collection = Locale.locale_collection
locale_collection_subsets = Locale.locale_collection_subsets
available = self.entries_as_dict
if len(available) == 0:
return LangStringEntry.EMPTY()
if len(available) == 1:
# optimize for common case
return available[0]
for locale_code in locale_codes:
# is the locale there?
if locale_code in available:
return available[locale_code]
# is the base locale there?
root_locale = Locale.extract_root_locale(locale_code)
if root_locale not in locale_codes:
locale_id = locale_collection.get(root_locale, None)
if locale_id and locale_id in available:
return available[locale_id]
# is another variant there?
mt_variants = list()
for sublocale in locale_collection_subsets[root_locale]:
if sublocale in locale_codes:
continue
if sublocale == root_locale:
continue
if Locale.locale_is_machine_translated(sublocale):
mt_variants.append(sublocale)
continue
locale_id = locale_collection.get(sublocale, None)
if locale_id and locale_id in available:
return available
# We found nothing, look at MT variants.
for sublocale in mt_variants:
locale_id = locale_collection.get(sublocale, None)
if locale_id and locale_id in available:
return available[locale_id]
# TODO: Look at other languages in the country?
# Give up and give nothing, or give first?
def list_unlocked_replicas(rse, limit, bytes=None, rse_id=None, worker_number=None, total_workers=None, delay_seconds=0, session=None):
"""
List RSE File replicas with no locks.
:param rse: the rse name.
:param bytes: the amount of needed bytes.
:param session: The database session in use.
:returns: a list of dictionary replica.
"""
if not rse_id:
rse_id = get_rse_id(rse=rse, session=session)
# filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED).\
none_value = None # Hack to get pep8 happy...
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\
with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\
filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\
filter(models.RSEFileAssociation.lock_cnt == 0).\
filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\
filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)),
and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\
order_by(models.RSEFileAssociation.tombstone)
# do no delete files used as sources
stmt = exists(select([1]).prefix_with("/*+ INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX) */", dialect='oracle')).\
where(and_(models.RSEFileAssociation.scope == models.Request.scope,
models.RSEFileAssociation.name == models.Request.name))
query = query.filter(not_(stmt))
if worker_number and total_workers and total_workers - 1 > 0:
if session.bind.dialect.name == 'oracle':
bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]
query = query.filter(text('ORA_HASH(name, :total_workers) = :worker_number', bindparams=bindparams))
elif session.bind.dialect.name == 'mysql':
query = query.filter(text('mod(md5(name), %s) = %s' % (total_workers - 1, worker_number - 1)))
elif session.bind.dialect.name == 'postgresql':
query = query.filter(text('mod(abs((\'x\'||md5(name))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1)))
needed_space = bytes
total_bytes, total_files = 0, 0
rows = []
for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000):
if state != ReplicaState.UNAVAILABLE:
total_bytes += bytes
if tombstone != OBSOLETE and needed_space is not None and total_bytes > needed_space:
break
total_files += 1
if total_files > limit:
break
rows.append({'scope': scope, 'name': name, 'path': path,
'bytes': bytes, 'tombstone': tombstone,
'state': state})
return rows
def touch_replica(replica, session=None):
"""
Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked.
:param replica: a dictionary with the information of the affected replica.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
"""
if 'rse_id' not in replica:
replica['rse_id'] = get_rse_id(rse=replica['rse'], session=session)
try:
accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None
session.query(models.RSEFileAssociation).\
filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
with_for_update(nowait=True).one()
session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
update({'accessed_at': accessed_at,
'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value,
models.RSEFileAssociation.tombstone != OBSOLETE),
accessed_at)],
else_=models.RSEFileAssociation.tombstone)},
synchronize_session=False)
session.query(models.DataIdentifier).\
filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
with_for_update(nowait=True).one()
session.query(models.DataIdentifier).\
filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
update({'accessed_at': accessed_at}, synchronize_session=False)
except DatabaseError:
return False
except NoResultFound:
return True
return True
def mark_unlocked_replicas(rse, bytes, session=None):
"""
Mark unlocked replicas as obsolete to release space quickly.
:param rse: the rse name.
:param bytes: the amount of needed bytes.
:param session: The database session in use.
:returns: The list of marked replicas.
"""
rse_id = get_rse_id(rse=rse, session=session)
none_value = None # Hack to get pep8 happy...
# query = session.query( func.count(), func.sum(models.RSEFileAssociation.bytes)).\
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes).\
with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\
filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\
filter(models.RSEFileAssociation.lock_cnt == 0).\
filter(models.RSEFileAssociation.tombstone != OBSOLETE).\
filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\
filter(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD))).\
order_by(models.RSEFileAssociation.bytes.desc())
rows = []
needed_space, total_bytes = bytes, 0
for (scope, name, bytes) in query.yield_per(1000):
if total_bytes > needed_space:
break
rowcount = session.query(models.RSEFileAssociation).\
filter_by(rse_id=rse_id, scope=scope, name=name).\
with_hint(models.RSEFileAssociation,
"index(REPLICAS REPLICAS_PK)",
'oracle').\
filter(models.RSEFileAssociation.tombstone != none_value).\
update({'tombstone': OBSOLETE}, synchronize_session=False)
if rowcount:
total_bytes += bytes
rows.append({'scope': scope, 'name': name})
return rows
def _do_create_from_json(
cls, json, parse_def, context,
duplicate_handling=None, object_importer=None):
# Special case for JSON-LD
added = False
ls = cls()
def guess_lang(value):
from .discussion import Discussion
discussion = context.get_instance_of_class(Discussion)
if discussion:
tr_service = discussion.translation_service()
lang, _ = tr_service.identify(value)
return LocaleLabel.UNDEFINED
if isinstance(json, list):
for entry_record in json:
value = entry_record['@value']
if value:
added = True
lang = entry_record.get('@language', None) or guess_lang(value)
ls.add_value(value, lang)
elif isinstance(json, dict):
if '@id' in json or '@type' in json:
return super(LangString, cls)._do_create_from_json(
json, parse_def, context,
duplicate_handling, object_importer)
elif '@value' in json:
value = json['@value']
if value:
added = True
lang = json.get('@language', None) or guess_lang(value)
ls.add_value(value, lang)
else:
for lang, value in json.items():
if value:
added = True
ls.add_value(value, lang)
elif isinstance(json, string_types):
if json:
added = True
lang = guess_lang(json)
ls.add_value(json, lang)
else:
raise ValueError("Not a valid langstring: " + json)
i_context = ls.get_instance_context(context)
if added:
cls.default_db.add(ls)
else:
i_context._instance = None
return i_context
def _do_update_from_json(
self, json, parse_def, context,
duplicate_handling=None, object_importer=None):
# Special case for JSON-LD
if isinstance(json, list):
for entry_record in json:
lang = entry_record.get('@language', LocaleLabel.UNDEFINED)
value = entry_record['@value']
entry = self.entries_as_dict.get(lang, None)
if entry:
entry.set_value(value)
elif value:
self.add_value(value, lang)
elif isinstance(json, dict):
if '@id' in json or '@type' in json:
return super(LangString, self)._do_update_from_json(
json, parse_def, context,
duplicate_handling, object_importer)
elif '@value' in json:
value = json['@value']
if value:
lang = json.get('@language', LocaleLabel.UNDEFINED)
entry = self.entries_as_dict.get(lang, None)
if entry:
entry.set_value(value)
elif value:
self.add_value(value, lang)
else:
for lang, value in json.items():
entry = self.entries_as_dict.get(lang, None)
if entry:
entry.set_value(value)
elif value:
self.add_value(value, lang)
elif isinstance(json, string_types):
from .discussion import Discussion
lang = LocaleLabel.UNDEFINED
discussion = context.get_instance_of_class(Discussion)
if discussion:
tr_service = discussion.translation_service()
lang, _ = tr_service.identify(json)
entry = self.entries_as_dict.get(lang, None)
if entry:
entry.set_value(json)
elif json:
self.add_value(json, lang)
else:
raise ValueError("Not a valid langstring: " + json)
return self
# TODO: Reinstate when the javascript can handle empty body/subject.
# def generic_json(
# self, view_def_name='default', user_id=None,
# permissions=(P_READ, ), base_uri='local:'):
# if self.id == self.EMPTY_ID:
# return None
# return super(LangString, self).generic_json(
# view_def_name=view_def_name, user_id=user_id,
# permissions=permissions, base_uri=base_uri)
def best_lang_old(self, locale_codes):
# Construct an expression that will find the best locale according to list.
scores = {}
current_score = 1
locale_collection = Locale.locale_collection
locale_collection_subsets = Locale.locale_collection_subsets
for locale_code in locale_codes:
# is the locale there?
locale_id = locale_collection.get(locale_code, None)
if locale_id:
scores[locale_id] = current_score
current_score += 1
# is the base locale there?
root_locale = Locale.extract_root_locale(locale_code)
if root_locale not in locale_codes:
locale_id = locale_collection.get(root_locale, None)
if locale_id:
scores[locale_id] = current_score
current_score += 1
# is another variant there?
mt_variants = list()
found = False
for sublocale in locale_collection_subsets[root_locale]:
if sublocale in locale_codes:
continue
if sublocale == root_locale:
continue
if Locale.locale_is_machine_translated(sublocale):
mt_variants.append(sublocale)
continue
locale_id = locale_collection.get(sublocale, None)
if locale_id:
scores[locale_id] = current_score
found = True
if found:
current_score += 1
# Put MT variants as last resort.
for sublocale in mt_variants:
locale_id = locale_collection.get(sublocale, None)
if locale_id:
scores[locale_id] = current_score
# Assume each mt variant to have a lower score.
current_score += 1
c = case(scores, value=LangStringEntry.locale_id,
else_=current_score)
q = Query(LangStringEntry).order_by(c).limit(1).subquery()
return aliased(LangStringEntry, q)
def identify_locale(self, locale_code, data, certainty=False):
# A translation service proposes a data identification.
# the information is deemed confirmed if it fits the initial
# hypothesis given at LSE creation.
changed = False
old_locale_code = self.locale
langstring = self.langstring or (
LangString.get(self.langstring_id) if self.langstring_id else None)
if self.is_machine_translated:
raise RuntimeError("Why identify a machine-translated locale?")
data = data or {}
original = self.locale_identification_data_json.get("original", None)
if not locale_code or locale_code == LocaleLabel.UNDEFINED:
if not self.locale or self.locale == LocaleLabel.UNDEFINED:
# replace id data with new one.
if original:
data['original'] = original
self.locale_identification_data_json = data
return False
elif original and locale_code == original:
if locale_code != old_locale_code:
self.locale = locale_code
changed = True
self.locale_identification_data_json = data
self.locale_confirmed = True
elif locale_code != old_locale_code:
if self.locale_confirmed:
if certainty:
raise RuntimeError("Conflict of certainty")
# keep the old confirming data
return False
# compare data? replacing with new for now.
if not original and self.locale_identification_data:
original = LocaleLabel.UNDEFINED
original = original or old_locale_code
if original != locale_code and original != LocaleLabel.UNDEFINED:
data["original"] = original
self.locale = locale_code
changed = True
self.locale_identification_data_json = data
self.locale_confirmed = certainty
else:
if original and original != locale_code:
data['original'] = original
self.locale_identification_data_json = data
self.locale_confirmed = certainty or locale_code == original
if changed:
if langstring:
langstring.remove_translations_of(self)
# Re-adding to verify there's no conflict
added = langstring.add_entry(self, certainty)
if added is None:
# We identified an entry with something that existed
# as a known original. Not sure what to do now,
# reverting just in case.
self.locale_code = old_locale_code
changed = False
return changed