def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
python类find()的实例源码
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session.
"""
from google.appengine._internal.django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
return True
else:
return False
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def has_option_desktop(self, key):
if key in self.STATIC_DATA:
return True
key = self._apply_mapping(key)
return not self.appinfo_xml.find(key) is None
def desktopf(self):
subelm = self.appinfo_xml.find("id")
return subelm.text
def has_option_desktop(self, key):
if key in self.STATIC_DATA:
return True
key = self._apply_mapping(key)
return not self.appinfo_xml.find(key) is None
def desktopf(self):
subelm = self.appinfo_xml.find("id")
return subelm.text
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
supported_lang_codes = get_languages()
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (loc, alias) in six.iteritems(aliases):
if loc in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=False,
charset='utf-8', force_all=False):
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
translation catalogs. Each path contains a structure such as
`<locale>/LC_MESSAGES/domain.po`.
:param str locale: a language as `'en'`
:param list domains: list of domain names to get. If empty list or None
is specified, get all domain names. default is None.
:param boolean gettext_compact:
* False: keep domains directory structure (default).
* True: domains in the sub directory will be merged into 1 file.
:param boolean force_all:
Set True if you want to get all catalogs rather than updated catalogs.
default is False.
:return: [CatalogInfo(), ...]
"""
if not locale:
return [] # locale is not specified
catalogs = set()
for locale_dir in locale_dirs:
if not locale_dir:
continue # skip system locale directory
base_dir = path.join(locale_dir, locale, 'LC_MESSAGES')
if not path.exists(base_dir):
continue # locale path is not found
for dirpath, dirnames, filenames in walk(base_dir, followlinks=True):
filenames = [f for f in filenames if f.endswith('.po')]
for filename in filenames:
base = path.splitext(filename)[0]
domain = path.relpath(path.join(dirpath, base), base_dir)
if gettext_compact and path.sep in domain:
domain = path.split(domain)[0]
domain = domain.replace(path.sep, SEP)
if domains and domain not in domains:
continue
cat = CatalogInfo(base_dir, domain, charset)
if force_all or cat.is_outdated():
catalogs.add(cat)
return catalogs
def _load_unicode_data(self):
'''Loads emoji names from UnicodeData.txt'''
dirnames = (USER_DATADIR, DATADIR,
# On Fedora, the “unicode-ucd” package has the
# UnicodeData.txt file here:
'/usr/share/unicode/ucd')
basenames = ('UnicodeData.txt',)
(path, open_function) = _find_path_and_open_function(
dirnames, basenames)
if not path:
sys.stderr.write(
'_load_unicode_data(): could not find "%s" in "%s"\n'
%(basenames, dirnames))
return
with open_function(path, mode='rt') as unicode_data_file:
for line in unicode_data_file.readlines():
if not line.strip():
continue
codepoint_string, name, category = line.split(';')[:3]
codepoint_integer = int(codepoint_string, 16)
emoji_string = chr(codepoint_integer)
if category in ('Cc', 'Co', 'Cs'):
# Never load control characters (“Cc”), they cause
# too much problems when trying to display
# them. Never load the “First” and “Last”
# characters of private use characters “Co” and
# surrogates (“Cs”) either as these are completely
# useless.
continue
if (not self._unicode_data_all
and not UNICODE_CATEGORIES[category]['valid']
and emoji_string not in VALID_CHARACTERS):
continue
self._add_to_emoji_dict(
(emoji_string, 'en'), 'names', [name.lower()])
self._add_to_emoji_dict(
(emoji_string, 'en'),
'ucategories', [
category,
UNICODE_CATEGORIES[category]['major'],
UNICODE_CATEGORIES[category]['minor'],
]
)
def _load_unicode_emoji_data(self):
'''
Loads emoji property data from emoji-data.txt
http://unicode.org/Public/emoji/5.0/emoji-data.txt
'''
dirnames = (USER_DATADIR, DATADIR)
basenames = ('emoji-data.txt',)
(path, open_function) = _find_path_and_open_function(
dirnames, basenames)
if not path:
sys.stderr.write(
'_load_unicode_emoji_data(): could not find "%s" in "%s"\n'
%(basenames, dirnames))
return
with open_function(path, mode='rt') as unicode_emoji_data_file:
for line in unicode_emoji_data_file.readlines():
unicode_version = ''
pattern = re.compile(
r'[^;]*;[^;]*#\s*(?P<uversion>[0-9]+\.[0-9]+)\s*'
+ r'\[[0-9]+\]')
match = pattern.match(line)
if match and match.group('uversion'):
unicode_version = match.group('uversion')
line = re.sub(r'#.*$', '', line).strip()
if not line:
continue
codepoint_string, property = [
x.strip() for x in line.split(';')[:2]]
codepoint_range = [
int(x, 16) for x in codepoint_string.split('..')]
if len(codepoint_range) == 1:
codepoint_range.append(codepoint_range[0])
assert len(codepoint_range) == 2
for codepoint in range(
codepoint_range[0], codepoint_range[1] + 1):
emoji_string = chr(codepoint)
self._add_to_emoji_dict(
(emoji_string, 'en'), 'properties', [property])
if unicode_version:
self._add_to_emoji_dict(
(emoji_string, 'en'), 'uversion', unicode_version)
def _load_unicode_emoji_sequences(self):
'''
Loads emoji property data from emoji-data.txt
http://unicode.org/Public/emoji/5.0/emoji-sequences.txt
'''
dirnames = (USER_DATADIR, DATADIR)
basenames = ('emoji-sequences.txt',)
(path, open_function) = _find_path_and_open_function(
dirnames, basenames)
if not path:
sys.stderr.write(
'_load_unicode_emoji_sequences(): could not find "%s" in "%s"\n'
%(basenames, dirnames))
return
with open_function(path, mode='rt') as unicode_emoji_sequences_file:
for line in unicode_emoji_sequences_file.readlines():
unicode_version = ''
pattern = re.compile(
r'[^;]*;[^;]*;[^;]*#\s*(?P<uversion>[0-9]+\.[0-9]+)\s*'
+ r'\[[0-9]+\]')
match = pattern.match(line)
if match and match.group('uversion'):
unicode_version = match.group('uversion')
line = re.sub(r'#.*$', '', line).strip()
if not line:
continue
codepoints, property, name = [
x.strip() for x in line.split(';')[:3]]
if codepoints == '0023 FE0F 20E3' and name == 'keycap:':
name = 'keycap: #'
emoji_string = ''
for codepoint in codepoints.split(' '):
emoji_string += chr(int(codepoint, 16))
if emoji_string:
self._add_to_emoji_dict(
(emoji_string, 'en'), 'properties', [property])
self._add_to_emoji_dict(
(emoji_string, 'en'), 'names', [name.lower()])
if unicode_version:
self._add_to_emoji_dict(
(emoji_string, 'en'), 'uversion', unicode_version)