0

About stack:

  • PostgreSQL with schemas
  • Sqlalchemy == 2.0.15
  • Alembic == 1.11.1

I'm new to alembic instrument and during my development find out such problem: when I'm writing alembic revision --autogenerate -m ".." it generates me basic ddl with all objects - like they doesn't exist in the schema.

I'm using extra schema for my project: events. Metadata creation:

self.sa_metadata: MetaData = MetaData(naming_convention=POSTGRES_INDEXES_NAMING_CONVENTION,
                                              schema=self.PG_SCHEMA)

Target metadata is also set:

target_metadata = APP_CTX.sa_metadata
print(target_metadata.schema)

Print shows me correct schema.

Looking for a solution in web I've find out that upgrading alembic env.py to:

def do_run_migrations(connection: Connection) -> None:
    # ADD include_schemas=True
    context.configure(connection=connection, target_metadata=target_metadata,
                      version_table_schema=target_metadata.schema,
                      include_schemas=True)

    with context.begin_transaction():
        # ADD EXTRA SCHEMA
        # context.execute('SET search_path TO events')
        context.execute("SET schema 'events'")
        context.run_migrations()


async def run_async_migrations() -> None:
    """In this scenario we need to create an Engine
    and associate a connection with the context.

    """

    connectable = async_engine_from_config(
        config.get_section(config.config_ini_section, {}),
        prefix="sqlalchemy.",
        poolclass=pool.NullPool,
    )

    async with connectable.connect() as connection:
        await connection.run_sync(do_run_migrations)

    await connectable.dispose()

allows me to see expected migration

def upgrade() -> None:
    # ### commands auto generated by Alembic - please adjust! ###
    op.create_table('access_rights',
    sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
    sa.Column('read', sa.BOOLEAN(), nullable=False),
    sa.Column('write', sa.BOOLEAN(), nullable=False),
    sa.Column('change', sa.BOOLEAN(), nullable=False),
    sa.PrimaryKeyConstraint('id', name=op.f('pk__access_rights')),
    schema='events'
    )
    op.create_index(op.f('ix__access_rights__id'), 'access_rights', ['id'], unique=False, schema='events')
    # op.drop_table('alembic_version')
    # op.drop_index('ix__user_privileges__id', table_name='user_privileges')
    # op.drop_table('user_privileges')
    # op.drop_index('ix__event__id', table_name='event')
    # op.drop_table('event')
    # op.drop_index('ix__role__id', table_name='role')
    # op.drop_table('role')
    # op.drop_index('ix__event_access_rights__id', table_name='event_access_rights')
    # op.drop_table('event_access_rights')
    # op.drop_index('ix__user__email', table_name='user')
    # op.drop_table('user')
    op.add_column('event_access_rights', sa.Column('rights_id', sa.Integer(), nullable=True), schema='events')
    op.create_foreign_key(op.f('fk__event_access_rights__rights_id__event'), 'event_access_rights', 'event', ['rights_id'], ['id'], source_schema='events', referent_schema='events')
    # ### end Alembic commands ###

But it adds unknown drop commands for me (I've commented them).

Any ideas how to set alembic work with selected schema(in my case its events)?

1 Answers1

0

The decision is simple enough, I've found out it here

def include_name(name, type_, parent_names):
    """
    If you need to watch changes only in specified schema
    """
    if type_ == "schema":
        # note this will not include the default schema
        return name in ["events"]
    else:
        return True


def do_run_migrations(connection: Connection) -> None:
    context.configure(connection=connection, target_metadata=target_metadata,
                      version_table_schema=target_metadata.schema,
                      include_schemas=True, include_name=include_name)

    with context.begin_transaction():
        context.run_migrations()

Now when I'm running alembic revision --autogenerate alembic is looking for changes only in events schema