def load_pre_hourly_processor_options():
app_opts = [
cfg.IntOpt('late_metric_slack_time', default=600),
cfg.StrOpt('data_provider',
default='monasca_transform.processor.'
'pre_hourly_processor:'
'PreHourlyProcessorDataProvider'),
cfg.BoolOpt('enable_instance_usage_df_cache'),
cfg.StrOpt('instance_usage_df_cache_storage_level'),
cfg.BoolOpt('enable_batch_time_filtering'),
cfg.IntOpt('effective_batch_revision', default=2)
]
app_group = cfg.OptGroup(name='pre_hourly_processor',
title='pre_hourly_processor')
cfg.CONF.register_group(app_group)
cfg.CONF.register_opts(app_opts, group=app_group)
python类BoolOpt()的实例源码
def load_database_options():
db_opts = [
cfg.StrOpt('server_type'),
cfg.StrOpt('host'),
cfg.StrOpt('database_name'),
cfg.StrOpt('username'),
cfg.StrOpt('password'),
cfg.BoolOpt('use_ssl', default=False),
cfg.StrOpt('ca_file')
]
mysql_group = cfg.OptGroup(name='database', title='database')
cfg.CONF.register_group(mysql_group)
cfg.CONF.register_opts(db_opts, group=mysql_group)
def load_service_options():
service_opts = [
cfg.StrOpt('coordinator_address'),
cfg.StrOpt('coordinator_group'),
cfg.FloatOpt('election_polling_frequency'),
cfg.BoolOpt('enable_debug_log_entries', default='false'),
cfg.StrOpt('setup_file'),
cfg.StrOpt('setup_target'),
cfg.StrOpt('spark_driver'),
cfg.StrOpt('service_log_path'),
cfg.StrOpt('service_log_filename',
default='monasca-transform.log'),
cfg.StrOpt('spark_event_logging_dest'),
cfg.StrOpt('spark_event_logging_enabled'),
cfg.StrOpt('spark_jars_list'),
cfg.StrOpt('spark_master_list'),
cfg.StrOpt('spark_python_files'),
cfg.IntOpt('stream_interval'),
cfg.StrOpt('work_dir'),
cfg.StrOpt('spark_home'),
cfg.BoolOpt('enable_record_store_df_cache'),
cfg.StrOpt('record_store_df_cache_storage_level')
]
service_group = cfg.OptGroup(name='service', title='service')
cfg.CONF.register_group(service_group)
cfg.CONF.register_opts(service_opts, group=service_group)
def load_stage_processors_options():
app_opts = [
cfg.BoolOpt('pre_hourly_processor_enabled'),
]
app_group = cfg.OptGroup(name='stage_processors',
title='stage_processors')
cfg.CONF.register_group(app_group)
cfg.CONF.register_opts(app_opts, group=app_group)
def logger_conf(logger_name):
return [
cfg.StrOpt('output_format',
default="%(asctime)s - %(levelname)s - %(message)s"),
cfg.BoolOpt('store', default=True),
cfg.StrOpt('logging_level', default='debug'),
cfg.StrOpt('logging_dir', default='/var/log/valet/'),
cfg.StrOpt('logger_name', default=logger_name + ".log"),
cfg.IntOpt('max_main_log_size', default=5000000),
cfg.IntOpt('max_log_size', default=1000000),
cfg.IntOpt('max_num_of_logs', default=3),
]
def upgrade():
conf = cfg.ConfigOpts()
sack_number_opt = copy.copy(_SACK_NUMBER_OPT)
sack_number_opt.default = 128
conf.register_cli_opts([
cfg.BoolOpt("skip-index", default=False,
help="Skip index upgrade."),
cfg.BoolOpt("skip-storage", default=False,
help="Skip storage upgrade."),
cfg.BoolOpt("skip-incoming", default=False,
help="Skip incoming storage upgrade."),
cfg.BoolOpt("skip-archive-policies-creation", default=False,
help="Skip default archive policies creation."),
sack_number_opt,
])
conf = service.prepare_service(conf=conf, log_to_std=True)
if not conf.skip_index:
index = indexer.get_driver(conf)
LOG.info("Upgrading indexer %s", index)
index.upgrade()
if not conf.skip_storage:
# FIXME(jd) Pass None as coordinator because it's not needed in this
# case. This will be removed when the storage will stop requiring a
# coordinator object.
s = storage.get_driver(conf, None)
LOG.info("Upgrading storage %s", s)
s.upgrade()
if not conf.skip_incoming:
i = incoming.get_driver(conf)
LOG.info("Upgrading incoming storage %s", i)
i.upgrade(conf.sacks_number)
if (not conf.skip_archive_policies_creation
and not index.list_archive_policies()
and not index.list_archive_policy_rules()):
if conf.skip_index:
index = indexer.get_driver(conf)
for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES):
index.create_archive_policy(ap)
index.create_archive_policy_rule("default", "*", "low")
def logger_conf(logger_name):
return [
cfg.StrOpt('output_format', default="%(asctime)s - %(levelname)s - %(message)s"), # dict
cfg.BoolOpt('store', default=True),
cfg.StrOpt('logging_level', default='debug'),
cfg.StrOpt('logging_dir', default='/var/log/valet/'),
cfg.StrOpt('logger_name', default=logger_name + ".log"),
cfg.IntOpt('max_main_log_size', default=5000000),
cfg.IntOpt('max_log_size', default=1000000),
cfg.IntOpt('max_num_of_logs', default=3),
]