def upgrade():
op.add_column('pubmed', sa.Column('mesh_headings', JSONB))
python类JSONB的实例源码
20160301131954_ictrp_create_table.py 文件源码
项目:collectors
作者: opentrials
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def upgrade():
op.create_table('ictrp',
# Meta
sa.Column('meta_uuid', sa.Text),
sa.Column('meta_source', sa.Text),
sa.Column('meta_created', sa.DateTime(timezone=True)),
sa.Column('meta_updated', sa.DateTime(timezone=True)),
# Main
sa.Column('register', sa.Text, primary_key=True),
sa.Column('last_refreshed_on', sa.Date),
sa.Column('main_id', sa.Text, primary_key=True),
sa.Column('date_of_registration', sa.Text),
sa.Column('primary_sponsor', sa.Text),
sa.Column('public_title', sa.Text),
sa.Column('scientific_title', sa.Text),
sa.Column('date_of_first_enrollment', sa.Text),
sa.Column('target_sample_size', sa.Integer),
sa.Column('recruitment_status', sa.Text),
sa.Column('url', sa.Text),
sa.Column('study_type', sa.Text),
sa.Column('study_design', sa.Text),
sa.Column('study_phase', sa.Text),
# Additional
sa.Column('countries_of_recruitment', ARRAY(sa.Text)),
sa.Column('contacts', JSONB),
sa.Column('key_inclusion_exclusion_criteria', sa.Text),
sa.Column('health_conditions_or_problems_studied', ARRAY(sa.Text)),
sa.Column('interventions', ARRAY(sa.Text)),
sa.Column('primary_outcomes', ARRAY(sa.Text)),
sa.Column('secondary_outcomes', ARRAY(sa.Text)),
sa.Column('secondary_ids', ARRAY(sa.Text)),
sa.Column('sources_of_monetary_support', ARRAY(sa.Text)),
sa.Column('secondary_sponsors', ARRAY(sa.Text)),
)
20161007222818_create_cochrane_reviews_table.py 文件源码
项目:collectors
作者: opentrials
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def upgrade():
op.create_table('cochrane_reviews',
sa.Column('meta_id', sa.Text),
sa.Column('meta_created', sa.DateTime(timezone=True), server_default=sa.text('now()')),
sa.Column('meta_updated', sa.DateTime(timezone=True), server_default=sa.text('now()')),
sa.Column('meta_source', sa.Text),
sa.Column('id', UUID, primary_key=True),
sa.Column('study_type', sa.Text),
sa.Column('file_name', sa.Text),
sa.Column('robs', JSONB),
sa.Column('study_id', sa.Text),
sa.Column('refs', JSONB),
sa.Column('doi_id', sa.Text),
)
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
if dialect.server_version_info >= (9, 4):
self.using_native_json = True
return dialect.type_descriptor(postgresql.JSONB())
if dialect.server_version_info >= (9, 2):
self.using_native_json = True
return dialect.type_descriptor(postgresql.JSON())
return dialect.type_descriptor(types.Text())
e4d4e95ae481_creating_base_schema.py 文件源码
项目:actsys
作者: intel-ctrlsys
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('profile',
sa.Column('profile_name', sa.String(length=128), nullable=False),
sa.Column('properties', JSONB(), nullable=False),
sa.PrimaryKeyConstraint('profile_name', name=op.f('profile_pkey'))
)
op.create_table('configuration',
sa.Column('key', sa.String(length=128), nullable=False),
sa.Column('value', sa.String(length=1024), nullable=False),
sa.PrimaryKeyConstraint('key', name=op.f('configuration_pkey'))
)
op.create_table('device',
sa.Column('device_id', sa.Integer(), nullable=False, autoincrement=True),
sa.Column('device_type', sa.String(length=64), nullable=False),
sa.Column('properties', JSONB(), nullable=True),
sa.Column('hostname', sa.String(length=256), nullable=True),
sa.Column('ip_address', sa.String(length=64), nullable=True),
sa.Column('mac_address', sa.String(length=64), nullable=True),
sa.Column('profile_name', sa.String(length=128), nullable=True),
sa.Column('deleted', sa.BOOLEAN(), server_default=false_just_for_sqlalchemy(), nullable=False),
sa.PrimaryKeyConstraint('device_id', name=op.f('device_pkey')),
sa.ForeignKeyConstraint(['profile_name'], ['profile.profile_name'], name='device_profile',
match='SIMPLE', ondelete='NO ACTION', onupdate='NO ACTION')
)
op.create_table('log',
sa.Column('process', sa.String(length=128), nullable=True),
sa.Column('timestamp', sa.DateTime(timezone=True), nullable=False, server_default=func.now()),
sa.Column('level', sa.Integer(), nullable=False),
sa.Column('device_id', sa.Integer(), nullable=True),
sa.Column('message', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['device_id'], ['device.device_id'], name='log_process',
match='SIMPLE', ondelete='NO ACTION', onupdate='NO ACTION'),
sa.CheckConstraint('level = ANY (ARRAY[0, 10, 15, 20, 30, 40, 50])', name=op.f('valid_log_levels'))
)
creating_functions()
# ### end Alembic commands ###
def map_column(self, mode: EditMode, request: Request, node: colander.SchemaNode, model: type, name: str, column: Column, column_type: TypeEngine) -> t.Tuple[colander.SchemaType, dict]:
"""Map non-relationship SQLAlchemy column to Colander SchemaNode.
:return: Tuple(constructed colander.SchemaType, dict of addtional colander.SchemaNode construction arguments)
"""
logger.debug("Mapping field %s, mode %s, node %s, column %s, column type %s", name, mode, node, column, column_type)
# Check for autogenerated columns (updated_at)
if column.onupdate:
if mode in (EditMode.edit, EditMode.add):
return TypeOverridesHandling.drop, {}
# Don't fill default values when added, as they are automatically populated
if column.default:
if mode == EditMode.add:
return TypeOverridesHandling.drop, {}
# Never add primary keys
# NOTE: TODO: We need to preserve ids because of nesting mechanism and groupedit widget wants it id
if column.primary_key:
# TODO: Looks like column.autoincrement is set True by default, so we cannot use it here
if mode in (EditMode.edit, EditMode.add):
return TypeOverridesHandling.drop, {}
if column.foreign_keys:
# Handled by relationship mapper
return TypeOverridesHandling.drop, {}
elif isinstance(column_type, (PostgreSQLUUID, columns.UUID)):
# UUID's cannot be22 edited
if mode in (EditMode.add, EditMode.edit):
return TypeOverridesHandling.drop, {}
# But let's show them
return fields.UUID(), dict(missing=colander.drop, widget=FriendlyUUIDWidget(readonly=True))
elif isinstance(column_type, Text):
return colander.String(), dict(widget=deform.widget.TextAreaWidget())
elif isinstance(column_type, JSONB):
return JSONValue(), dict(widget=JSONWidget())
elif isinstance(column_type, (JSONB, columns.JSONB)):
# Can't edit JSON
if mode in (EditMode.add, EditMode.edit):
return TypeOverridesHandling.drop, {}
return colander.String(), {}
elif isinstance(column_type, LargeBinary):
# Can't edit binary
return TypeOverridesHandling.drop, {}
elif isinstance(column_type, Geometry):
# Can't edit geometry
return TypeOverridesHandling.drop, {}
elif isinstance(column_type, (INET, columns.INET)):
return colander.String(), {}
else:
# Default mapping / unknown, let the parent handle
return TypeOverridesHandling.unknown, {}
ddb09a7d6633_delete_unnecessary_db_tables.py 文件源码
项目:fabric8-analytics-worker
作者: fabric8-analytics
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def downgrade():
"""Downgrade the database to an older revision."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stacks',
sa.Column('id', sa.INTEGER(),
server_default=sa.text("nextval('stacks_id_seq'::regclass)"),
nullable=False),
sa.Column('is_ref_stack', sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.Column('stack_json', postgresql.JSONB(astext_type=sa.Text()),
autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='stacks_pkey'),
postgresql_ignore_search_path=False)
op.create_table('similar_components',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('fromcomponent', sa.TEXT(), autoincrement=False, nullable=False),
sa.Column('tocomponent', sa.TEXT(), autoincrement=False, nullable=False),
sa.Column('similarity_distance', postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='similar_components_pkey'),
sa.UniqueConstraint('fromcomponent', 'tocomponent', name='sim_comps'))
op.create_table('similar_stacks',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('analysis', postgresql.JSONB(astext_type=sa.Text()),
autoincrement=False, nullable=True),
sa.Column('similar_stack_id', sa.INTEGER(), autoincrement=False,
nullable=False),
sa.Column('similarity_value', postgresql.DOUBLE_PRECISION(precision=53),
autoincrement=False, nullable=False),
sa.Column('stack_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['similar_stack_id'], ['stacks.id'],
name='similar_stacks_similar_stack_id_fkey'),
sa.ForeignKeyConstraint(['stack_id'], ['stacks.id'],
name='similar_stacks_stack_id_fkey'),
sa.PrimaryKeyConstraint('id', name='similar_stacks_pkey'),
sa.UniqueConstraint('stack_id', 'similar_stack_id', name='sim_unique'))
op.create_table('esmarker',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('worker_result_id', sa.INTEGER(), autoincrement=False,
nullable=True),
sa.ForeignKeyConstraint(['worker_result_id'], ['worker_results.id'],
name='esmarker_worker_result_id_fkey'),
sa.PrimaryKeyConstraint('id', name='esmarker_pkey'))
# ### end Alembic commands ###
def apply(self, bind=None, timeout=DEFAULT):
cls = type(self._instance)
values = self._values.copy()
# handle JSON columns
json_updates = {}
for prop, value in self._props.items():
value = prop.save(self._instance, value)
updates = json_updates.setdefault(prop.column_name, {})
if self._literal:
updates[prop.name] = value
else:
if isinstance(value, int):
value = sa.cast(value, sa.BigInteger)
elif not isinstance(value, ClauseElement):
value = sa.cast(value, sa.Unicode)
updates[sa.cast(prop.name, sa.Unicode)] = value
for column_name, updates in json_updates.items():
column = getattr(cls, column_name)
if self._literal:
values[column_name] = column.concat(updates)
else:
if isinstance(column.type, sa_pg.JSONB):
func = sa.func.jsonb_build_object
else:
func = sa.func.json_build_object
values[column_name] = column.concat(
func(*itertools.chain(*updates.items())))
opts = dict(return_model=False)
if timeout is not DEFAULT:
opts['timeout'] = timeout
clause = self._clause.values(
**values,
).returning(
*[getattr(cls, key) for key in values],
).execution_options(**opts)
row = await cls.__metadata__.first(clause, bind=bind)
if not row:
raise NoSuchRowError()
self._instance.__values__.update(row)
for prop in self._props:
prop.reload(self._instance)
return self
20160428204857_pubmed_create_table.py 文件源码
项目:collectors
作者: opentrials
项目源码
文件源码
阅读 16
收藏 0
点赞 0
评论 0
def upgrade():
op.create_table('pubmed',
# Meta
sa.Column('meta_id', sa.Text, unique=True),
sa.Column('meta_source', sa.Text),
sa.Column('meta_created', sa.DateTime(timezone=True)),
sa.Column('meta_updated', sa.DateTime(timezone=True)),
# Medline
sa.Column('pmid', sa.Text, primary_key=True),
sa.Column('date_created', sa.Date),
sa.Column('date_completed', sa.Date),
sa.Column('date_revised', sa.Date),
sa.Column('country', sa.Text),
sa.Column('medline_ta', sa.Text),
sa.Column('nlm_unique_id', sa.Text),
sa.Column('issn_linking', sa.Text),
# Journal
sa.Column('journal_issn', sa.Text),
sa.Column('journal_title', sa.Text),
sa.Column('journal_iso', sa.Text),
# Article
sa.Column('article_title', sa.Text),
sa.Column('article_abstract', sa.Text),
sa.Column('article_authors', ARRAY(sa.Text)),
sa.Column('article_language', sa.Text),
sa.Column('article_publication_type_list', ARRAY(sa.Text)),
sa.Column('article_vernacular_title', sa.Text),
sa.Column('article_date', sa.Date),
# Pubmed
sa.Column('publication_status', sa.Text),
sa.Column('identifiers_list', JSONB()),
)
def upgrade():
op.create_table('artifact_set_members',
sa.Column('set_id', sa.VARCHAR(length=40), nullable=False),
sa.Column('artifact_id', sa.VARCHAR(length=40), nullable=False),
sa.PrimaryKeyConstraint('set_id', 'artifact_id'))
op.create_table('artifact_sets',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('set_id', sa.VARCHAR(length=40), nullable=True),
sa.Column('name', sa.VARCHAR(length=1000), nullable=True),
sa.Column('created_at', pg.TIMESTAMP(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table('runs',
sa.Column('id', sa.VARCHAR(length=40), nullable=False),
sa.Column('hostname', sa.VARCHAR(length=256), nullable=True),
sa.Column('info', pg.JSONB(), nullable=True),
sa.Column('created_at', pg.TIMESTAMP(), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table('artifacts',
sa.Column('id', sa.VARCHAR(length=40), nullable=False),
sa.Column('value_id', sa.VARCHAR(length=50), nullable=True),
sa.Column('run_id', sa.VARCHAR(length=40), nullable=True),
sa.Column('name', sa.VARCHAR(length=1000), nullable=True),
sa.Column('version', sa.INTEGER(), nullable=True),
sa.Column('fn_module', sa.VARCHAR(length=100), nullable=True),
sa.Column('fn_name', sa.VARCHAR(length=100), nullable=True),
sa.Column('composite', sa.BOOLEAN(), nullable=True),
sa.Column('value_id_duration', sa.FLOAT(), nullable=True),
sa.Column('compute_duration', sa.FLOAT(), nullable=True),
sa.Column('hash_duration', sa.FLOAT(), nullable=True),
sa.Column('computed_at', pg.TIMESTAMP(), nullable=True),
sa.Column('added_at', pg.TIMESTAMP(), nullable=True),
sa.Column('input_artifact_ids', pg.ARRAY(pg.VARCHAR(length=40)), nullable=True),
sa.Column('inputs_json', pg.JSONB(), nullable=True),
sa.Column('serializer', sa.VARCHAR(length=128), nullable=True),
sa.Column('load_kwargs', pg.JSONB(), nullable=True),
sa.Column('dump_kwargs', pg.JSONB(), nullable=True),
sa.Column('custom_fields', pg.JSONB(), nullable=True),
sa.ForeignKeyConstraint(['run_id'], ['runs.id'], ),
sa.PrimaryKeyConstraint('id'))