Compare commits

..

9 Commits

45 changed files with 1049 additions and 1538 deletions

View File

@@ -40,6 +40,7 @@
"cgroupdriver",
"charliermarsh",
"Checkpointing",
"cloudflared",
"codellama",
"codezombiech",
"compactmode",
@@ -307,7 +308,6 @@
"usernamehw",
"userprefs",
"vaninventory",
"vdev",
"vfat",
"victron",
"virt",

View File

@@ -21,6 +21,7 @@
alembic
apprise
apscheduler
confluent-kafka
fastapi
fastapi-cli
httpx

View File

@@ -46,7 +46,12 @@ ALREADY_ATTACHED_QUERY = text("""
def upgrade() -> None:
"""Attach all weekly partition tables to the posts parent table."""
connection = op.get_bind()
already_attached = {row[0] for row in connection.execute(ALREADY_ATTACHED_QUERY, {"parent": f"{schema}.posts"})}
already_attached = {
row[0]
for row in connection.execute(
ALREADY_ATTACHED_QUERY, {"parent": f"{schema}.posts"}
)
}
for year in range(PARTITION_START_YEAR, PARTITION_END_YEAR + 1):
for week in range(1, iso_weeks_in_year(year) + 1):
@@ -69,4 +74,7 @@ def downgrade() -> None:
for year in range(PARTITION_START_YEAR, PARTITION_END_YEAR + 1):
for week in range(1, iso_weeks_in_year(year) + 1):
table_name = f"posts_{year}_{week:02d}"
op.execute(f"ALTER TABLE {schema}.posts DETACH PARTITION {schema}.{table_name}")
op.execute(
f"ALTER TABLE {schema}.posts "
f"DETACH PARTITION {schema}.{table_name}"
)

View File

@@ -1,153 +0,0 @@
"""adding congress data.
Revision ID: 83bfc8af92d8
Revises: a1b2c3d4e5f6
Create Date: 2026-03-27 10:43:02.324510
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import sqlalchemy as sa
from alembic import op
from python.orm import DataScienceDevBase
if TYPE_CHECKING:
from collections.abc import Sequence
# revision identifiers, used by Alembic.
revision: str = "83bfc8af92d8"
down_revision: str | None = "a1b2c3d4e5f6"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
schema = DataScienceDevBase.schema_name
def upgrade() -> None:
"""Upgrade."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"bill",
sa.Column("congress", sa.Integer(), nullable=False),
sa.Column("bill_type", sa.String(), nullable=False),
sa.Column("number", sa.Integer(), nullable=False),
sa.Column("title", sa.String(), nullable=True),
sa.Column("title_short", sa.String(), nullable=True),
sa.Column("official_title", sa.String(), nullable=True),
sa.Column("status", sa.String(), nullable=True),
sa.Column("status_at", sa.Date(), nullable=True),
sa.Column("sponsor_bioguide_id", sa.String(), nullable=True),
sa.Column("subjects_top_term", sa.String(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.PrimaryKeyConstraint("id", name=op.f("pk_bill")),
sa.UniqueConstraint("congress", "bill_type", "number", name="uq_bill_congress_type_number"),
schema=schema,
)
op.create_index("ix_bill_congress", "bill", ["congress"], unique=False, schema=schema)
op.create_table(
"legislator",
sa.Column("bioguide_id", sa.Text(), nullable=False),
sa.Column("thomas_id", sa.String(), nullable=True),
sa.Column("lis_id", sa.String(), nullable=True),
sa.Column("govtrack_id", sa.Integer(), nullable=True),
sa.Column("opensecrets_id", sa.String(), nullable=True),
sa.Column("fec_ids", sa.String(), nullable=True),
sa.Column("first_name", sa.String(), nullable=False),
sa.Column("last_name", sa.String(), nullable=False),
sa.Column("official_full_name", sa.String(), nullable=True),
sa.Column("nickname", sa.String(), nullable=True),
sa.Column("birthday", sa.Date(), nullable=True),
sa.Column("gender", sa.String(), nullable=True),
sa.Column("current_party", sa.String(), nullable=True),
sa.Column("current_state", sa.String(), nullable=True),
sa.Column("current_district", sa.Integer(), nullable=True),
sa.Column("current_chamber", sa.String(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.PrimaryKeyConstraint("id", name=op.f("pk_legislator")),
schema=schema,
)
op.create_index(op.f("ix_legislator_bioguide_id"), "legislator", ["bioguide_id"], unique=True, schema=schema)
op.create_table(
"bill_text",
sa.Column("bill_id", sa.Integer(), nullable=False),
sa.Column("version_code", sa.String(), nullable=False),
sa.Column("version_name", sa.String(), nullable=True),
sa.Column("text_content", sa.String(), nullable=True),
sa.Column("date", sa.Date(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.ForeignKeyConstraint(
["bill_id"], [f"{schema}.bill.id"], name=op.f("fk_bill_text_bill_id_bill"), ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_bill_text")),
sa.UniqueConstraint("bill_id", "version_code", name="uq_bill_text_bill_id_version_code"),
schema=schema,
)
op.create_table(
"vote",
sa.Column("congress", sa.Integer(), nullable=False),
sa.Column("chamber", sa.String(), nullable=False),
sa.Column("session", sa.Integer(), nullable=False),
sa.Column("number", sa.Integer(), nullable=False),
sa.Column("vote_type", sa.String(), nullable=True),
sa.Column("question", sa.String(), nullable=True),
sa.Column("result", sa.String(), nullable=True),
sa.Column("result_text", sa.String(), nullable=True),
sa.Column("vote_date", sa.Date(), nullable=False),
sa.Column("yea_count", sa.Integer(), nullable=True),
sa.Column("nay_count", sa.Integer(), nullable=True),
sa.Column("not_voting_count", sa.Integer(), nullable=True),
sa.Column("present_count", sa.Integer(), nullable=True),
sa.Column("bill_id", sa.Integer(), nullable=True),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.ForeignKeyConstraint(["bill_id"], [f"{schema}.bill.id"], name=op.f("fk_vote_bill_id_bill")),
sa.PrimaryKeyConstraint("id", name=op.f("pk_vote")),
sa.UniqueConstraint("congress", "chamber", "session", "number", name="uq_vote_congress_chamber_session_number"),
schema=schema,
)
op.create_index("ix_vote_congress_chamber", "vote", ["congress", "chamber"], unique=False, schema=schema)
op.create_index("ix_vote_date", "vote", ["vote_date"], unique=False, schema=schema)
op.create_table(
"vote_record",
sa.Column("vote_id", sa.Integer(), nullable=False),
sa.Column("legislator_id", sa.Integer(), nullable=False),
sa.Column("position", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["legislator_id"],
[f"{schema}.legislator.id"],
name=op.f("fk_vote_record_legislator_id_legislator"),
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["vote_id"], [f"{schema}.vote.id"], name=op.f("fk_vote_record_vote_id_vote"), ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("vote_id", "legislator_id", name=op.f("pk_vote_record")),
schema=schema,
)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("vote_record", schema=schema)
op.drop_index("ix_vote_date", table_name="vote", schema=schema)
op.drop_index("ix_vote_congress_chamber", table_name="vote", schema=schema)
op.drop_table("vote", schema=schema)
op.drop_table("bill_text", schema=schema)
op.drop_index(op.f("ix_legislator_bioguide_id"), table_name="legislator", schema=schema)
op.drop_table("legislator", schema=schema)
op.drop_index("ix_bill_congress", table_name="bill", schema=schema)
op.drop_table("bill", schema=schema)
# ### end Alembic commands ###

View File

@@ -1,58 +0,0 @@
"""adding LegislatorSocialMedia.
Revision ID: 5cd7eee3549d
Revises: 83bfc8af92d8
Create Date: 2026-03-29 11:53:44.224799
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import sqlalchemy as sa
from alembic import op
from python.orm import DataScienceDevBase
if TYPE_CHECKING:
from collections.abc import Sequence
# revision identifiers, used by Alembic.
revision: str = "5cd7eee3549d"
down_revision: str | None = "83bfc8af92d8"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
schema = DataScienceDevBase.schema_name
def upgrade() -> None:
"""Upgrade."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"legislator_social_media",
sa.Column("legislator_id", sa.Integer(), nullable=False),
sa.Column("platform", sa.String(), nullable=False),
sa.Column("account_name", sa.String(), nullable=False),
sa.Column("url", sa.String(), nullable=True),
sa.Column("source", sa.String(), nullable=False),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("updated", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.ForeignKeyConstraint(
["legislator_id"],
[f"{schema}.legislator.id"],
name=op.f("fk_legislator_social_media_legislator_id_legislator"),
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_legislator_social_media")),
schema=schema,
)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("legislator_social_media", schema=schema)
# ### end Alembic commands ###

View File

@@ -3,6 +3,7 @@
from __future__ import annotations
import logging
import re
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal

View File

@@ -1,187 +0,0 @@
"""removed ds table from richie DB.
Revision ID: c8a794340928
Revises: 6b275323f435
Create Date: 2026-03-29 15:29:23.643146
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from python.orm import RichieBase
if TYPE_CHECKING:
from collections.abc import Sequence
# revision identifiers, used by Alembic.
revision: str = "c8a794340928"
down_revision: str | None = "6b275323f435"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
schema = RichieBase.schema_name
def upgrade() -> None:
"""Upgrade."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("vote_record", schema=schema)
op.drop_index(op.f("ix_vote_congress_chamber"), table_name="vote", schema=schema)
op.drop_index(op.f("ix_vote_date"), table_name="vote", schema=schema)
op.drop_index(op.f("ix_legislator_bioguide_id"), table_name="legislator", schema=schema)
op.drop_table("legislator", schema=schema)
op.drop_table("vote", schema=schema)
op.drop_index(op.f("ix_bill_congress"), table_name="bill", schema=schema)
op.drop_table("bill", schema=schema)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"vote",
sa.Column("congress", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("chamber", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("session", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("number", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("vote_type", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("question", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("result", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("result_text", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("vote_date", sa.DATE(), autoincrement=False, nullable=False),
sa.Column("yea_count", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("nay_count", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("not_voting_count", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("present_count", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("bill_id", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column(
"updated",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.ForeignKeyConstraint(["bill_id"], [f"{schema}.bill.id"], name=op.f("fk_vote_bill_id_bill")),
sa.PrimaryKeyConstraint("id", name=op.f("pk_vote")),
sa.UniqueConstraint(
"congress",
"chamber",
"session",
"number",
name=op.f("uq_vote_congress_chamber_session_number"),
postgresql_include=[],
postgresql_nulls_not_distinct=False,
),
schema=schema,
)
op.create_index(op.f("ix_vote_date"), "vote", ["vote_date"], unique=False, schema=schema)
op.create_index(op.f("ix_vote_congress_chamber"), "vote", ["congress", "chamber"], unique=False, schema=schema)
op.create_table(
"vote_record",
sa.Column("vote_id", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("legislator_id", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("position", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(
["legislator_id"],
[f"{schema}.legislator.id"],
name=op.f("fk_vote_record_legislator_id_legislator"),
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["vote_id"], [f"{schema}.vote.id"], name=op.f("fk_vote_record_vote_id_vote"), ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("vote_id", "legislator_id", name=op.f("pk_vote_record")),
schema=schema,
)
op.create_table(
"legislator",
sa.Column("bioguide_id", sa.TEXT(), autoincrement=False, nullable=False),
sa.Column("thomas_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("lis_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("govtrack_id", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("opensecrets_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("fec_ids", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("first_name", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("last_name", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("official_full_name", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("nickname", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("birthday", sa.DATE(), autoincrement=False, nullable=True),
sa.Column("gender", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("current_party", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("current_state", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("current_district", sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column("current_chamber", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column(
"updated",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_legislator")),
schema=schema,
)
op.create_index(op.f("ix_legislator_bioguide_id"), "legislator", ["bioguide_id"], unique=True, schema=schema)
op.create_table(
"bill",
sa.Column("congress", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("bill_type", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("number", sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column("title", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("title_short", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("official_title", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("status", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("status_at", sa.DATE(), autoincrement=False, nullable=True),
sa.Column("sponsor_bioguide_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("subjects_top_term", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.Column(
"updated",
postgresql.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
autoincrement=False,
nullable=False,
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_bill")),
sa.UniqueConstraint(
"congress",
"bill_type",
"number",
name=op.f("uq_bill_congress_type_number"),
postgresql_include=[],
postgresql_nulls_not_distinct=False,
),
schema=schema,
)
op.create_index(op.f("ix_bill_congress"), "bill", ["congress"], unique=False, schema=schema)
# ### end Alembic commands ###

View File

@@ -0,0 +1,104 @@
"""Utilities for converting Bluesky identifiers to numeric database IDs.
Handles DID-to-user_id hashing, TID-to-post_id decoding, and AT-URI parsing.
"""
from __future__ import annotations
import hashlib
TID_CHARSET = "234567abcdefghijklmnopqrstuvwxyz"
_TID_LENGTH = 13
_BIGINT_MASK = 0x7FFFFFFFFFFFFFFF
_AT_URI_SEGMENT_COUNT = 3
def did_to_user_id(did: str) -> int:
"""Convert a DID string to a deterministic 63-bit integer for user_id.
Uses SHA-256, truncated to 63 bits (positive signed BigInteger range).
Collision probability is negligible at Bluesky's scale (~tens of millions of users).
Args:
did: A Bluesky DID string, e.g. "did:plc:abc123".
Returns:
A positive 63-bit integer suitable for BigInteger storage.
"""
digest = hashlib.sha256(did.encode()).digest()
return int.from_bytes(digest[:8], "big") & _BIGINT_MASK
def tid_to_integer(tid: str) -> int:
"""Decode a Bluesky TID (base32-sortbase) into a 64-bit integer for post_id.
TIDs are 13-character, base32-sortbase encoded identifiers that encode a
microsecond timestamp plus a clock ID. They are globally unique by construction.
Args:
tid: A 13-character TID string, e.g. "3abc2defghijk".
Returns:
A positive integer suitable for BigInteger storage.
Raises:
ValueError: If the TID is malformed (wrong length or invalid characters).
"""
if len(tid) != _TID_LENGTH:
message = f"TID must be {_TID_LENGTH} characters, got {len(tid)}: {tid!r}"
raise ValueError(message)
result = 0
for char in tid:
index = TID_CHARSET.find(char)
if index == -1:
message = f"Invalid character {char!r} in TID {tid!r}"
raise ValueError(message)
result = result * 32 + index
return result
def parse_at_uri(uri: str) -> tuple[str, str, str]:
"""Parse an AT-URI into its components.
Args:
uri: An AT-URI string, e.g. "at://did:plc:abc123/app.bsky.feed.post/3abc2defghijk".
Returns:
A tuple of (did, collection, rkey).
Raises:
ValueError: If the URI doesn't have the expected format.
"""
stripped = uri.removeprefix("at://")
parts = stripped.split("/", maxsplit=2)
if len(parts) != _AT_URI_SEGMENT_COUNT:
message = f"Expected {_AT_URI_SEGMENT_COUNT} path segments in AT-URI, got {len(parts)}: {uri!r}"
raise ValueError(message)
return parts[0], parts[1], parts[2]
def post_id_from_uri(uri: str) -> int:
"""Extract and decode the post_id (TID) from an AT-URI.
Args:
uri: An AT-URI pointing to a post.
Returns:
The post_id as an integer.
"""
_did, _collection, rkey = parse_at_uri(uri)
return tid_to_integer(rkey)
def user_id_from_uri(uri: str) -> int:
"""Extract and hash the user_id (DID) from an AT-URI.
Args:
uri: An AT-URI pointing to a post.
Returns:
The user_id as an integer.
"""
did, _collection, _rkey = parse_at_uri(uri)
return did_to_user_id(did)

View File

@@ -0,0 +1,143 @@
"""Transform Bluesky Jetstream messages into rows matching the Posts table schema."""
from __future__ import annotations
import json
import logging
from datetime import datetime
from python.data_science.bluesky_ids import (
did_to_user_id,
post_id_from_uri,
tid_to_integer,
user_id_from_uri,
)
logger = logging.getLogger(__name__)
INSTANCE = "bsky"
POST_COLLECTION = "app.bsky.feed.post"
EMBED_RECORD_TYPE = "app.bsky.embed.record"
EMBED_RECORD_WITH_MEDIA_TYPE = "app.bsky.embed.recordWithMedia"
def transform_jetstream_post(message: dict) -> dict:
"""Transform a Jetstream commit message into a dict matching Posts table columns.
Expects a Jetstream message with kind=commit, operation=create,
collection=app.bsky.feed.post.
Args:
message: The full Jetstream JSON message.
Returns:
A dict with keys matching the Posts table columns.
"""
did = message["did"]
commit = message["commit"]
record = commit["record"]
row: dict = {
"post_id": tid_to_integer(commit["rkey"]),
"user_id": did_to_user_id(did),
"instance": INSTANCE,
"date": datetime.fromisoformat(record["createdAt"]),
"text": record.get("text", ""),
"langs": _extract_langs(record),
"like_count": 0,
"reply_count": 0,
"repost_count": 0,
"reply_to": None,
"replied_author": None,
"thread_root": None,
"thread_root_author": None,
"repost_from": None,
"reposted_author": None,
"quotes": None,
"quoted_author": None,
"labels": _extract_labels(record),
"sent_label": None,
"sent_score": None,
}
_extract_reply_refs(record, row)
_extract_quote_refs(record, row)
return row
def is_post_create(message: dict) -> bool:
"""Check if a Jetstream message is a post creation event.
Args:
message: The full Jetstream JSON message.
Returns:
True if this is a create commit for app.bsky.feed.post.
"""
if message.get("kind") != "commit":
return False
commit = message.get("commit", {})
return commit.get("operation") == "create" and commit.get("collection") == POST_COLLECTION
def _extract_langs(record: dict) -> str | None:
"""Extract langs array as a JSON string, or None if absent."""
langs = record.get("langs")
if langs is None:
return None
return json.dumps(langs)
def _extract_labels(record: dict) -> str | None:
"""Extract self-labels as a JSON string, or None if absent."""
labels_obj = record.get("labels")
if labels_obj is None:
return None
values = labels_obj.get("values", [])
if not values:
return None
label_strings = [label.get("val", "") for label in values]
return json.dumps(label_strings)
def _extract_reply_refs(record: dict, row: dict) -> None:
"""Populate reply_to, replied_author, thread_root, thread_root_author from record.reply."""
reply = record.get("reply")
if reply is None:
return
parent = reply.get("parent", {})
parent_uri = parent.get("uri")
if parent_uri:
row["reply_to"] = post_id_from_uri(parent_uri)
row["replied_author"] = user_id_from_uri(parent_uri)
root = reply.get("root", {})
root_uri = root.get("uri")
if root_uri:
row["thread_root"] = post_id_from_uri(root_uri)
row["thread_root_author"] = user_id_from_uri(root_uri)
def _extract_quote_refs(record: dict, row: dict) -> None:
"""Populate quotes and quoted_author from embed record references."""
embed = record.get("embed")
if embed is None:
return
embed_type = embed.get("$type", "")
if embed_type == EMBED_RECORD_TYPE:
_set_quote_from_record(embed.get("record", {}), row)
elif embed_type == EMBED_RECORD_WITH_MEDIA_TYPE:
inner_record = embed.get("record", {}).get("record", {})
_set_quote_from_record(inner_record, row)
def _set_quote_from_record(record_ref: dict, row: dict) -> None:
"""Set quotes and quoted_author from a record reference object."""
uri = record_ref.get("uri")
if uri and POST_COLLECTION in uri:
row["quotes"] = post_id_from_uri(uri)
row["quoted_author"] = user_id_from_uri(uri)

View File

@@ -0,0 +1,203 @@
"""Kafka consumer that ingests Bluesky posts into the partitioned Posts table.
Consumes Jetstream messages from Kafka, transforms them into Posts rows,
and batch-inserts them into PostgreSQL with manual offset commits.
Usage:
firehose-consumer
firehose-consumer --kafka-servers kafka:9092 --batch-size 500
"""
from __future__ import annotations
import json
import logging
import signal
from os import getenv
from threading import Event
from typing import Annotated
import typer
from confluent_kafka import Consumer, KafkaError, KafkaException
from sqlalchemy.orm import Session
from python.data_science.bluesky_transform import is_post_create, transform_jetstream_post
from python.data_science.ingest_posts import ingest_batch
from python.orm.common import get_postgres_engine
from python.orm.data_science_dev.posts.failed_ingestion import FailedIngestion
logger = logging.getLogger(__name__)
DEFAULT_TOPIC = "bluesky.firehose.posts"
DEFAULT_KAFKA_SERVERS = "localhost:9092"
DEFAULT_GROUP_ID = "bluesky-posts-ingestor"
DEFAULT_BATCH_SIZE = 500
POLL_TIMEOUT_SECONDS = 5.0
shutdown_event = Event()
app = typer.Typer(help="Consume Bluesky posts from Kafka and ingest into PostgreSQL.")
@app.command()
def main(
kafka_servers: Annotated[str, typer.Option(help="Kafka bootstrap servers")] = "",
topic: Annotated[str, typer.Option(help="Kafka topic to consume from")] = "",
group_id: Annotated[str, typer.Option(help="Kafka consumer group ID")] = "",
batch_size: Annotated[int, typer.Option(help="Messages per DB insert batch")] = DEFAULT_BATCH_SIZE,
) -> None:
"""Consume Bluesky posts from Kafka and ingest into the partitioned posts table."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%H:%M:%S",
)
servers = kafka_servers or getenv("KAFKA_BOOTSTRAP_SERVERS", DEFAULT_KAFKA_SERVERS)
topic_name = topic or getenv("BLUESKY_FIREHOSE_TOPIC", DEFAULT_TOPIC)
group = group_id or getenv("KAFKA_GROUP_ID", DEFAULT_GROUP_ID)
signal.signal(signal.SIGTERM, _handle_shutdown)
signal.signal(signal.SIGINT, _handle_shutdown)
consumer = _create_consumer(servers, group)
consumer.subscribe([topic_name])
engine = get_postgres_engine(name="DATA_SCIENCE_DEV")
total_inserted = 0
logger.info("Starting firehose consumer: topic=%s group=%s batch_size=%d", topic_name, group, batch_size)
try:
with Session(engine) as session:
while not shutdown_event.is_set():
inserted = _consume_batch(consumer, session, batch_size)
total_inserted += inserted
if inserted > 0:
logger.info("Batch inserted %d rows (total: %d)", inserted, total_inserted)
except KafkaException:
logger.exception("Fatal Kafka error")
finally:
logger.info("Closing consumer (total inserted: %d)", total_inserted)
consumer.close()
def _consume_batch(consumer: Consumer, session: Session, batch_size: int) -> int:
"""Poll a batch of messages, transform, and insert into the database.
Args:
consumer: The Kafka consumer instance.
session: SQLAlchemy database session.
batch_size: Maximum number of messages to consume per batch.
Returns:
Number of rows successfully inserted.
"""
messages = consumer.consume(num_messages=batch_size, timeout=POLL_TIMEOUT_SECONDS)
if not messages:
return 0
rows: list[dict] = []
for message in messages:
error = message.error()
if error is not None:
if error.code() == KafkaError._PARTITION_EOF: # noqa: SLF001 — confluent-kafka exposes this as a pseudo-private constant; no public alternative exists
continue
logger.error("Consumer error: %s", error)
continue
row = _safe_transform(message.value(), session)
if row is not None:
rows.append(row)
if not rows:
consumer.commit(asynchronous=False)
return 0
inserted = ingest_batch(session, rows)
consumer.commit(asynchronous=False)
return inserted
def _safe_transform(raw_value: bytes | None, session: Session) -> dict | None:
"""Transform a Kafka message value into a Posts row, logging failures.
Args:
raw_value: Raw message bytes from Kafka.
session: SQLAlchemy session for logging failures.
Returns:
A transformed row dict, or None if transformation failed.
"""
if raw_value is None:
return None
try:
message = json.loads(raw_value)
except (json.JSONDecodeError, UnicodeDecodeError):
logger.exception("Failed to decode Kafka message")
_log_failed_ingestion(session, raw_value, "JSON decode error")
return None
if not is_post_create(message):
return None
try:
return transform_jetstream_post(message)
except (KeyError, ValueError, TypeError):
logger.exception("Failed to transform Jetstream message")
_log_failed_ingestion(session, raw_value, "Transform error")
return None
def _log_failed_ingestion(session: Session, raw_value: bytes, error: str) -> None:
"""Log a failed ingestion to the FailedIngestion table.
Args:
session: SQLAlchemy session.
raw_value: The raw message bytes.
error: Description of the error.
"""
try:
session.add(
FailedIngestion(
raw_line=raw_value.decode(errors="replace")[:10000],
error=error,
)
)
session.commit()
except Exception:
session.rollback()
logger.exception("Failed to log ingestion failure")
def _create_consumer(servers: str, group: str) -> Consumer:
"""Create a configured Kafka consumer.
Args:
servers: Kafka bootstrap servers string.
group: Consumer group ID.
Returns:
A configured confluent_kafka.Consumer.
"""
config = {
"bootstrap.servers": servers,
"group.id": group,
"auto.offset.reset": "earliest",
"enable.auto.commit": False,
"max.poll.interval.ms": 300000,
"fetch.min.bytes": 1024,
"session.timeout.ms": 30000,
}
return Consumer(config)
def _handle_shutdown(_signum: int, _frame: object) -> None:
"""Signal handler to trigger graceful shutdown."""
logger.info("Shutdown signal received")
shutdown_event.set()
if __name__ == "__main__":
app()

View File

@@ -0,0 +1,230 @@
"""Bluesky Jetstream firehose to Kafka producer.
Connects to the Bluesky Jetstream WebSocket API with zstd compression,
filters for post creation events, and produces them to a Kafka topic.
Usage:
firehose-producer
firehose-producer --kafka-servers kafka:9092 --topic bluesky.firehose.posts
"""
from __future__ import annotations
import json
import logging
import signal
from os import getenv
from threading import Event
from typing import Annotated
import typer
from compression import zstd
from confluent_kafka import KafkaError, KafkaException, Producer
from websockets.exceptions import ConnectionClosed
from websockets.sync.client import connect
logger = logging.getLogger(__name__)
JETSTREAM_URL = "wss://jetstream2.us-east.bsky.network/subscribe"
DEFAULT_TOPIC = "bluesky.firehose.posts"
DEFAULT_KAFKA_SERVERS = "localhost:9092"
POLL_INTERVAL = 100
POST_COLLECTION = "app.bsky.feed.post"
shutdown_event = Event()
app = typer.Typer(help="Stream Bluesky firehose posts into Kafka.")
@app.command()
def main(
kafka_servers: Annotated[str, typer.Option(help="Kafka bootstrap servers")] = "",
topic: Annotated[str, typer.Option(help="Kafka topic to produce to")] = "",
collections: Annotated[str, typer.Option(help="Comma-separated collections to subscribe to")] = POST_COLLECTION,
) -> None:
"""Connect to Bluesky Jetstream and produce post events to Kafka."""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%H:%M:%S",
)
servers = kafka_servers or getenv("KAFKA_BOOTSTRAP_SERVERS", DEFAULT_KAFKA_SERVERS)
topic_name = topic or getenv("BLUESKY_FIREHOSE_TOPIC", DEFAULT_TOPIC)
signal.signal(signal.SIGTERM, _handle_shutdown)
signal.signal(signal.SIGINT, _handle_shutdown)
producer = _create_producer(servers)
cursor: int | None = None
logger.info("Starting firehose producer → %s on %s", topic_name, servers)
while not shutdown_event.is_set():
try:
cursor = _stream_loop(producer, topic_name, collections, cursor)
except (ConnectionClosed, OSError):
logger.exception("WebSocket disconnected, reconnecting")
except KafkaException:
logger.exception("Kafka error, reconnecting")
if not shutdown_event.is_set():
logger.info("Reconnecting in 5 seconds (cursor=%s)", cursor)
shutdown_event.wait(timeout=5)
logger.info("Shutting down, flushing producer")
producer.flush(timeout=30)
logger.info("Producer shutdown complete")
def _stream_loop(
producer: Producer,
topic: str,
collections: str,
cursor: int | None,
) -> int | None:
"""Connect to Jetstream and stream messages to Kafka until disconnected.
Args:
producer: The Kafka producer instance.
topic: Kafka topic name.
collections: Comma-separated AT Protocol collections to subscribe to.
cursor: Optional microsecond timestamp to resume from.
Returns:
The last processed time_us cursor value.
"""
url = _build_jetstream_url(collections, cursor)
logger.info("Connecting to %s", url)
message_count = 0
last_cursor = cursor
with connect(url, additional_headers={"Accept-Encoding": "zstd"}) as websocket:
logger.info("Connected to Jetstream")
while not shutdown_event.is_set():
try:
raw_frame = websocket.recv(timeout=10)
except TimeoutError:
producer.poll(0)
continue
text = _decode_frame(raw_frame)
message = json.loads(text)
time_us = message.get("time_us")
if time_us is not None:
last_cursor = time_us
if not _is_post_create(message):
continue
did = message.get("did", "")
try:
producer.produce(
topic,
key=did.encode(),
value=text.encode() if isinstance(text, str) else text,
callback=_delivery_callback,
)
except BufferError:
logger.warning("Producer buffer full, flushing")
producer.flush(timeout=10)
producer.produce(
topic,
key=did.encode(),
value=text.encode() if isinstance(text, str) else text,
callback=_delivery_callback,
)
message_count += 1
if message_count % POLL_INTERVAL == 0:
producer.poll(0)
if message_count % 10000 == 0:
logger.info("Produced %d messages (cursor=%s)", message_count, last_cursor)
return last_cursor
def _build_jetstream_url(collections: str, cursor: int | None) -> str:
"""Build the Jetstream WebSocket URL with query parameters.
Args:
collections: Comma-separated collection names.
cursor: Optional microsecond timestamp for resumption.
Returns:
The full WebSocket URL.
"""
params = ["compress=true"]
for raw_collection in collections.split(","):
cleaned = raw_collection.strip()
if cleaned:
params.append(f"wantedCollections={cleaned}")
if cursor is not None:
params.append(f"cursor={cursor}")
return f"{JETSTREAM_URL}?{'&'.join(params)}"
def _decode_frame(frame: str | bytes) -> str:
"""Decode a WebSocket frame, decompressing zstd if binary.
Jetstream with compress=true sends zstd-compressed binary frames.
Args:
frame: Raw WebSocket frame data.
Returns:
The decoded JSON string.
"""
if isinstance(frame, bytes):
return zstd.decompress(frame).decode()
return frame
def _is_post_create(message: dict) -> bool:
"""Check if a Jetstream message is a post creation commit."""
if message.get("kind") != "commit":
return False
commit = message.get("commit", {})
return commit.get("operation") == "create" and commit.get("collection") == POST_COLLECTION
def _create_producer(servers: str) -> Producer:
"""Create a configured Kafka producer.
Args:
servers: Kafka bootstrap servers string.
Returns:
A configured confluent_kafka.Producer.
"""
config = {
"bootstrap.servers": servers,
"linger.ms": 50,
"batch.size": 65536,
"compression.type": "zstd",
"acks": "all",
"retries": 5,
"retry.backoff.ms": 500,
}
return Producer(config)
def _delivery_callback(error: KafkaError | None, _message: object) -> None:
"""Log delivery failures from the Kafka producer."""
if error is not None:
logger.error("Kafka delivery failed: %s", error)
def _handle_shutdown(_signum: int, _frame: object) -> None:
"""Signal handler to trigger graceful shutdown."""
logger.info("Shutdown signal received")
shutdown_event.set()
if __name__ == "__main__":
app()

View File

@@ -1,613 +0,0 @@
"""Ingestion pipeline for loading congress data from unitedstates/congress JSON files.
Loads legislators, bills, votes, vote records, and bill text into the data_science_dev database.
Expects the parent directory to contain congress-tracker/ and congress-legislators/ as siblings.
Usage:
ingest-congress /path/to/parent/
ingest-congress /path/to/parent/ --congress 118
ingest-congress /path/to/parent/ --congress 118 --only bills
"""
from __future__ import annotations
import logging
from pathlib import Path # noqa: TC003 needed at runtime for typer CLI argument
from typing import TYPE_CHECKING, Annotated
import orjson
import typer
import yaml
from sqlalchemy import select
from sqlalchemy.orm import Session
from python.common import configure_logger
from python.orm.common import get_postgres_engine
from python.orm.data_science_dev.congress import Bill, BillText, Legislator, LegislatorSocialMedia, Vote, VoteRecord
if TYPE_CHECKING:
from collections.abc import Iterator
from sqlalchemy.engine import Engine
logger = logging.getLogger(__name__)
BATCH_SIZE = 10_000
app = typer.Typer(help="Ingest unitedstates/congress data into data_science_dev.")
@app.command()
def main(
parent_dir: Annotated[
Path,
typer.Argument(help="Parent directory containing congress-tracker/ and congress-legislators/"),
],
congress: Annotated[int | None, typer.Option(help="Only ingest a specific congress number")] = None,
only: Annotated[
str | None,
typer.Option(help="Only run a specific step: legislators, social-media, bills, votes, bill-text"),
] = None,
) -> None:
"""Ingest congress data from unitedstates/congress JSON files."""
configure_logger(level="INFO")
data_dir = parent_dir / "congress-tracker/congress/data/"
legislators_dir = parent_dir / "congress-legislators"
if not data_dir.is_dir():
typer.echo(f"Expected congress-tracker/ directory: {data_dir}", err=True)
raise typer.Exit(code=1)
if not legislators_dir.is_dir():
typer.echo(f"Expected congress-legislators/ directory: {legislators_dir}", err=True)
raise typer.Exit(code=1)
engine = get_postgres_engine(name="DATA_SCIENCE_DEV")
congress_dirs = _resolve_congress_dirs(data_dir, congress)
if not congress_dirs:
typer.echo("No congress directories found.", err=True)
raise typer.Exit(code=1)
logger.info("Found %d congress directories to process", len(congress_dirs))
steps: dict[str, tuple] = {
"legislators": (ingest_legislators, (engine, legislators_dir)),
"legislators-social-media": (ingest_social_media, (engine, legislators_dir)),
"bills": (ingest_bills, (engine, congress_dirs)),
"votes": (ingest_votes, (engine, congress_dirs)),
"bill-text": (ingest_bill_text, (engine, congress_dirs)),
}
if only:
if only not in steps:
typer.echo(f"Unknown step: {only}. Choose from: {', '.join(steps)}", err=True)
raise typer.Exit(code=1)
steps = {only: steps[only]}
for step_name, (step_func, step_args) in steps.items():
logger.info("=== Starting step: %s ===", step_name)
step_func(*step_args)
logger.info("=== Finished step: %s ===", step_name)
logger.info("ingest-congress done")
def _resolve_congress_dirs(data_dir: Path, congress: int | None) -> list[Path]:
"""Find congress number directories under data_dir."""
if congress is not None:
target = data_dir / str(congress)
return [target] if target.is_dir() else []
return sorted(path for path in data_dir.iterdir() if path.is_dir() and path.name.isdigit())
def _flush_batch(session: Session, batch: list[object], label: str) -> int:
"""Add a batch of ORM objects to the session and commit. Returns count added."""
if not batch:
return 0
session.add_all(batch)
session.commit()
count = len(batch)
logger.info("Committed %d %s", count, label)
batch.clear()
return count
# ---------------------------------------------------------------------------
# Legislators — loaded from congress-legislators YAML files
# ---------------------------------------------------------------------------
def ingest_legislators(engine: Engine, legislators_dir: Path) -> None:
"""Load legislators from congress-legislators YAML files."""
legislators_data = _load_legislators_yaml(legislators_dir)
logger.info("Loaded %d legislators from YAML files", len(legislators_data))
with Session(engine) as session:
existing_legislators = {
legislator.bioguide_id: legislator for legislator in session.scalars(select(Legislator)).all()
}
logger.info("Found %d existing legislators in DB", len(existing_legislators))
total_inserted = 0
total_updated = 0
for entry in legislators_data:
bioguide_id = entry.get("id", {}).get("bioguide")
if not bioguide_id:
continue
fields = _parse_legislator(entry)
if existing := existing_legislators.get(bioguide_id):
changed = False
for field, value in fields.items():
if value is not None and getattr(existing, field) != value:
setattr(existing, field, value)
changed = True
if changed:
total_updated += 1
else:
session.add(Legislator(bioguide_id=bioguide_id, **fields))
total_inserted += 1
session.commit()
logger.info("Inserted %d new legislators, updated %d existing", total_inserted, total_updated)
def _load_legislators_yaml(legislators_dir: Path) -> list[dict]:
"""Load and combine legislators-current.yaml and legislators-historical.yaml."""
legislators: list[dict] = []
for filename in ("legislators-current.yaml", "legislators-historical.yaml"):
path = legislators_dir / filename
if not path.exists():
logger.warning("Legislators file not found: %s", path)
continue
with path.open() as file:
data = yaml.safe_load(file)
if isinstance(data, list):
legislators.extend(data)
return legislators
def _parse_legislator(entry: dict) -> dict:
"""Extract Legislator fields from a congress-legislators YAML entry."""
ids = entry.get("id", {})
name = entry.get("name", {})
bio = entry.get("bio", {})
terms = entry.get("terms", [])
latest_term = terms[-1] if terms else {}
fec_ids = ids.get("fec")
fec_ids_joined = ",".join(fec_ids) if isinstance(fec_ids, list) else fec_ids
chamber = latest_term.get("type")
chamber_normalized = {"rep": "House", "sen": "Senate"}.get(chamber, chamber)
return {
"thomas_id": ids.get("thomas"),
"lis_id": ids.get("lis"),
"govtrack_id": ids.get("govtrack"),
"opensecrets_id": ids.get("opensecrets"),
"fec_ids": fec_ids_joined,
"first_name": name.get("first"),
"last_name": name.get("last"),
"official_full_name": name.get("official_full"),
"nickname": name.get("nickname"),
"birthday": bio.get("birthday"),
"gender": bio.get("gender"),
"current_party": latest_term.get("party"),
"current_state": latest_term.get("state"),
"current_district": latest_term.get("district"),
"current_chamber": chamber_normalized,
}
# ---------------------------------------------------------------------------
# Social Media — loaded from legislators-social-media.yaml
# ---------------------------------------------------------------------------
SOCIAL_MEDIA_PLATFORMS = {
"twitter": "https://twitter.com/{account}",
"facebook": "https://facebook.com/{account}",
"youtube": "https://youtube.com/{account}",
"instagram": "https://instagram.com/{account}",
"mastodon": None,
}
def ingest_social_media(engine: Engine, legislators_dir: Path) -> None:
"""Load social media accounts from legislators-social-media.yaml."""
social_media_path = legislators_dir / "legislators-social-media.yaml"
if not social_media_path.exists():
logger.warning("Social media file not found: %s", social_media_path)
return
with social_media_path.open() as file:
social_media_data = yaml.safe_load(file)
if not isinstance(social_media_data, list):
logger.warning("Unexpected format in %s", social_media_path)
return
logger.info("Loaded %d entries from legislators-social-media.yaml", len(social_media_data))
with Session(engine) as session:
legislator_map = _build_legislator_map(session)
existing_accounts = {
(account.legislator_id, account.platform)
for account in session.scalars(select(LegislatorSocialMedia)).all()
}
logger.info("Found %d existing social media accounts in DB", len(existing_accounts))
total_inserted = 0
total_updated = 0
for entry in social_media_data:
bioguide_id = entry.get("id", {}).get("bioguide")
if not bioguide_id:
continue
legislator_id = legislator_map.get(bioguide_id)
if legislator_id is None:
continue
social = entry.get("social", {})
for platform, url_template in SOCIAL_MEDIA_PLATFORMS.items():
account_name = social.get(platform)
if not account_name:
continue
url = url_template.format(account=account_name) if url_template else None
if (legislator_id, platform) in existing_accounts:
total_updated += 1
else:
session.add(
LegislatorSocialMedia(
legislator_id=legislator_id,
platform=platform,
account_name=str(account_name),
url=url,
source="https://github.com/unitedstates/congress-legislators",
)
)
existing_accounts.add((legislator_id, platform))
total_inserted += 1
session.commit()
logger.info("Inserted %d new social media accounts, updated %d existing", total_inserted, total_updated)
def _iter_voters(position_group: object) -> Iterator[dict]:
"""Yield voter dicts from a vote position group (handles list, single dict, or string)."""
if isinstance(position_group, dict):
yield position_group
elif isinstance(position_group, list):
for voter in position_group:
if isinstance(voter, dict):
yield voter
# ---------------------------------------------------------------------------
# Bills
# ---------------------------------------------------------------------------
def ingest_bills(engine: Engine, congress_dirs: list[Path]) -> None:
"""Load bill data.json files."""
with Session(engine) as session:
existing_bills = {(bill.congress, bill.bill_type, bill.number) for bill in session.scalars(select(Bill)).all()}
logger.info("Found %d existing bills in DB", len(existing_bills))
total_inserted = 0
batch: list[Bill] = []
for congress_dir in congress_dirs:
bills_dir = congress_dir / "bills"
if not bills_dir.is_dir():
continue
logger.info("Scanning bills from %s", congress_dir.name)
for bill_file in bills_dir.rglob("data.json"):
data = _read_json(bill_file)
if data is None:
continue
bill = _parse_bill(data, existing_bills)
if bill is not None:
batch.append(bill)
if len(batch) >= BATCH_SIZE:
total_inserted += _flush_batch(session, batch, "bills")
total_inserted += _flush_batch(session, batch, "bills")
logger.info("Inserted %d new bills total", total_inserted)
def _parse_bill(data: dict, existing_bills: set[tuple[int, str, int]]) -> Bill | None:
"""Parse a bill data.json dict into a Bill ORM object, skipping existing."""
raw_congress = data.get("congress")
bill_type = data.get("bill_type")
raw_number = data.get("number")
if raw_congress is None or bill_type is None or raw_number is None:
return None
congress = int(raw_congress)
number = int(raw_number)
if (congress, bill_type, number) in existing_bills:
return None
sponsor_bioguide = None
sponsor = data.get("sponsor")
if sponsor:
sponsor_bioguide = sponsor.get("bioguide_id")
return Bill(
congress=congress,
bill_type=bill_type,
number=number,
title=data.get("short_title") or data.get("official_title"),
title_short=data.get("short_title"),
official_title=data.get("official_title"),
status=data.get("status"),
status_at=data.get("status_at"),
sponsor_bioguide_id=sponsor_bioguide,
subjects_top_term=data.get("subjects_top_term"),
)
# ---------------------------------------------------------------------------
# Votes (and vote records)
# ---------------------------------------------------------------------------
def ingest_votes(engine: Engine, congress_dirs: list[Path]) -> None:
"""Load vote data.json files with their vote records."""
with Session(engine) as session:
legislator_map = _build_legislator_map(session)
logger.info("Loaded %d legislators into lookup map", len(legislator_map))
bill_map = _build_bill_map(session)
logger.info("Loaded %d bills into lookup map", len(bill_map))
existing_votes = {
(vote.congress, vote.chamber, vote.session, vote.number) for vote in session.scalars(select(Vote)).all()
}
logger.info("Found %d existing votes in DB", len(existing_votes))
total_inserted = 0
batch: list[Vote] = []
for congress_dir in congress_dirs:
votes_dir = congress_dir / "votes"
if not votes_dir.is_dir():
continue
logger.info("Scanning votes from %s", congress_dir.name)
for vote_file in votes_dir.rglob("data.json"):
data = _read_json(vote_file)
if data is None:
continue
vote = _parse_vote(data, legislator_map, bill_map, existing_votes)
if vote is not None:
batch.append(vote)
if len(batch) >= BATCH_SIZE:
total_inserted += _flush_batch(session, batch, "votes")
total_inserted += _flush_batch(session, batch, "votes")
logger.info("Inserted %d new votes total", total_inserted)
def _build_legislator_map(session: Session) -> dict[str, int]:
"""Build a mapping of bioguide_id -> legislator.id."""
return {legislator.bioguide_id: legislator.id for legislator in session.scalars(select(Legislator)).all()}
def _build_bill_map(session: Session) -> dict[tuple[int, str, int], int]:
"""Build a mapping of (congress, bill_type, number) -> bill.id."""
return {(bill.congress, bill.bill_type, bill.number): bill.id for bill in session.scalars(select(Bill)).all()}
def _parse_vote(
data: dict,
legislator_map: dict[str, int],
bill_map: dict[tuple[int, str, int], int],
existing_votes: set[tuple[int, str, int, int]],
) -> Vote | None:
"""Parse a vote data.json dict into a Vote ORM object with records."""
raw_congress = data.get("congress")
chamber = data.get("chamber")
raw_number = data.get("number")
vote_date = data.get("date")
if raw_congress is None or chamber is None or raw_number is None or vote_date is None:
return None
raw_session = data.get("session")
if raw_session is None:
return None
congress = int(raw_congress)
number = int(raw_number)
session_number = int(raw_session)
# Normalize chamber from "h"/"s" to "House"/"Senate"
chamber_normalized = {"h": "House", "s": "Senate"}.get(chamber, chamber)
if (congress, chamber_normalized, session_number, number) in existing_votes:
return None
# Resolve linked bill
bill_id = None
bill_ref = data.get("bill")
if bill_ref:
bill_key = (
int(bill_ref.get("congress", congress)),
bill_ref.get("type"),
int(bill_ref.get("number", 0)),
)
bill_id = bill_map.get(bill_key)
raw_votes = data.get("votes", {})
vote_counts = _count_votes(raw_votes)
vote_records = _build_vote_records(raw_votes, legislator_map)
return Vote(
congress=congress,
chamber=chamber_normalized,
session=session_number,
number=number,
vote_type=data.get("type"),
question=data.get("question"),
result=data.get("result"),
result_text=data.get("result_text"),
vote_date=vote_date[:10] if isinstance(vote_date, str) else vote_date,
bill_id=bill_id,
vote_records=vote_records,
**vote_counts,
)
def _count_votes(raw_votes: dict) -> dict[str, int]:
"""Count voters per position category, correctly handling dict and list formats."""
yea_count = 0
nay_count = 0
not_voting_count = 0
present_count = 0
for position, position_group in raw_votes.items():
voter_count = sum(1 for _ in _iter_voters(position_group))
if position in ("Yea", "Aye"):
yea_count += voter_count
elif position in ("Nay", "No"):
nay_count += voter_count
elif position == "Not Voting":
not_voting_count += voter_count
elif position == "Present":
present_count += voter_count
return {
"yea_count": yea_count,
"nay_count": nay_count,
"not_voting_count": not_voting_count,
"present_count": present_count,
}
def _build_vote_records(raw_votes: dict, legislator_map: dict[str, int]) -> list[VoteRecord]:
"""Build VoteRecord objects from raw vote data."""
records: list[VoteRecord] = []
for position, position_group in raw_votes.items():
for voter in _iter_voters(position_group):
bioguide_id = voter.get("id")
if not bioguide_id:
continue
legislator_id = legislator_map.get(bioguide_id)
if legislator_id is None:
continue
records.append(
VoteRecord(
legislator_id=legislator_id,
position=position,
)
)
return records
# ---------------------------------------------------------------------------
# Bill Text
# ---------------------------------------------------------------------------
def ingest_bill_text(engine: Engine, congress_dirs: list[Path]) -> None:
"""Load bill text from text-versions directories."""
with Session(engine) as session:
bill_map = _build_bill_map(session)
logger.info("Loaded %d bills into lookup map", len(bill_map))
existing_bill_texts = {
(bill_text.bill_id, bill_text.version_code) for bill_text in session.scalars(select(BillText)).all()
}
logger.info("Found %d existing bill text versions in DB", len(existing_bill_texts))
total_inserted = 0
batch: list[BillText] = []
for congress_dir in congress_dirs:
logger.info("Scanning bill texts from %s", congress_dir.name)
for bill_text in _iter_bill_texts(congress_dir, bill_map, existing_bill_texts):
batch.append(bill_text)
if len(batch) >= BATCH_SIZE:
total_inserted += _flush_batch(session, batch, "bill texts")
total_inserted += _flush_batch(session, batch, "bill texts")
logger.info("Inserted %d new bill text versions total", total_inserted)
def _iter_bill_texts(
congress_dir: Path,
bill_map: dict[tuple[int, str, int], int],
existing_bill_texts: set[tuple[int, str]],
) -> Iterator[BillText]:
"""Yield BillText objects for a single congress directory, skipping existing."""
bills_dir = congress_dir / "bills"
if not bills_dir.is_dir():
return
for bill_dir in bills_dir.rglob("text-versions"):
if not bill_dir.is_dir():
continue
bill_key = _bill_key_from_dir(bill_dir.parent, congress_dir)
if bill_key is None:
continue
bill_id = bill_map.get(bill_key)
if bill_id is None:
continue
for version_dir in sorted(bill_dir.iterdir()):
if not version_dir.is_dir():
continue
if (bill_id, version_dir.name) in existing_bill_texts:
continue
text_content = _read_bill_text(version_dir)
version_data = _read_json(version_dir / "data.json")
yield BillText(
bill_id=bill_id,
version_code=version_dir.name,
version_name=version_data.get("version_name") if version_data else None,
date=version_data.get("issued_on") if version_data else None,
text_content=text_content,
)
def _bill_key_from_dir(bill_dir: Path, congress_dir: Path) -> tuple[int, str, int] | None:
"""Extract (congress, bill_type, number) from directory structure."""
congress = int(congress_dir.name)
bill_type = bill_dir.parent.name
name = bill_dir.name
# Directory name is like "hr3590" — strip the type prefix to get the number
number_str = name[len(bill_type) :]
if not number_str.isdigit():
return None
return (congress, bill_type, int(number_str))
def _read_bill_text(version_dir: Path) -> str | None:
"""Read bill text from a version directory, preferring .txt over .xml."""
for extension in ("txt", "htm", "html", "xml"):
candidates = list(version_dir.glob(f"document.{extension}"))
if not candidates:
candidates = list(version_dir.glob(f"*.{extension}"))
if candidates:
try:
return candidates[0].read_text(encoding="utf-8")
except Exception:
logger.exception("Failed to read %s", candidates[0])
return None
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _read_json(path: Path) -> dict | None:
"""Read and parse a JSON file, returning None on failure."""
try:
return orjson.loads(path.read_bytes())
except FileNotFoundError:
return None
except Exception:
logger.exception("Failed to parse %s", path)
return None
if __name__ == "__main__":
app()

View File

@@ -1,14 +0,0 @@
"""init."""
from python.orm.data_science_dev.congress.bill import Bill, BillText
from python.orm.data_science_dev.congress.legislator import Legislator, LegislatorSocialMedia
from python.orm.data_science_dev.congress.vote import Vote, VoteRecord
__all__ = [
"Bill",
"BillText",
"Legislator",
"LegislatorSocialMedia",
"Vote",
"VoteRecord",
]

View File

@@ -1,66 +0,0 @@
"""Bill model - legislation introduced in Congress."""
from __future__ import annotations
from datetime import date
from typing import TYPE_CHECKING
from sqlalchemy import ForeignKey, Index, UniqueConstraint
from sqlalchemy.orm import Mapped, mapped_column, relationship
from python.orm.data_science_dev.base import DataScienceDevTableBase
if TYPE_CHECKING:
from python.orm.data_science_dev.congress.vote import Vote
class Bill(DataScienceDevTableBase):
"""Legislation with congress number, type, titles, status, and sponsor."""
__tablename__ = "bill"
congress: Mapped[int]
bill_type: Mapped[str]
number: Mapped[int]
title: Mapped[str | None]
title_short: Mapped[str | None]
official_title: Mapped[str | None]
status: Mapped[str | None]
status_at: Mapped[date | None]
sponsor_bioguide_id: Mapped[str | None]
subjects_top_term: Mapped[str | None]
votes: Mapped[list[Vote]] = relationship(
"Vote",
back_populates="bill",
)
bill_texts: Mapped[list[BillText]] = relationship(
"BillText",
back_populates="bill",
cascade="all, delete-orphan",
)
__table_args__ = (
UniqueConstraint("congress", "bill_type", "number", name="uq_bill_congress_type_number"),
Index("ix_bill_congress", "congress"),
)
class BillText(DataScienceDevTableBase):
"""Stores different text versions of a bill (introduced, enrolled, etc.)."""
__tablename__ = "bill_text"
bill_id: Mapped[int] = mapped_column(ForeignKey("main.bill.id", ondelete="CASCADE"))
version_code: Mapped[str]
version_name: Mapped[str | None]
text_content: Mapped[str | None]
date: Mapped[date | None]
bill: Mapped[Bill] = relationship("Bill", back_populates="bill_texts")
__table_args__ = (UniqueConstraint("bill_id", "version_code", name="uq_bill_text_bill_id_version_code"),)

View File

@@ -1,66 +0,0 @@
"""Legislator model - members of Congress."""
from __future__ import annotations
from datetime import date
from typing import TYPE_CHECKING
from sqlalchemy import ForeignKey, Text
from sqlalchemy.orm import Mapped, mapped_column, relationship
from python.orm.data_science_dev.base import DataScienceDevTableBase
if TYPE_CHECKING:
from python.orm.data_science_dev.congress.vote import VoteRecord
class Legislator(DataScienceDevTableBase):
"""Members of Congress with identification and current term info."""
__tablename__ = "legislator"
bioguide_id: Mapped[str] = mapped_column(Text, unique=True, index=True)
thomas_id: Mapped[str | None]
lis_id: Mapped[str | None]
govtrack_id: Mapped[int | None]
opensecrets_id: Mapped[str | None]
fec_ids: Mapped[str | None]
first_name: Mapped[str]
last_name: Mapped[str]
official_full_name: Mapped[str | None]
nickname: Mapped[str | None]
birthday: Mapped[date | None]
gender: Mapped[str | None]
current_party: Mapped[str | None]
current_state: Mapped[str | None]
current_district: Mapped[int | None]
current_chamber: Mapped[str | None]
social_media_accounts: Mapped[list[LegislatorSocialMedia]] = relationship(
"LegislatorSocialMedia",
back_populates="legislator",
cascade="all, delete-orphan",
)
vote_records: Mapped[list[VoteRecord]] = relationship(
"VoteRecord",
back_populates="legislator",
cascade="all, delete-orphan",
)
class LegislatorSocialMedia(DataScienceDevTableBase):
"""Social media account linked to a legislator."""
__tablename__ = "legislator_social_media"
legislator_id: Mapped[int] = mapped_column(ForeignKey("main.legislator.id"))
platform: Mapped[str]
account_name: Mapped[str]
url: Mapped[str | None]
source: Mapped[str]
legislator: Mapped[Legislator] = relationship(back_populates="social_media_accounts")

View File

@@ -1,79 +0,0 @@
"""Vote model - roll call votes in Congress."""
from __future__ import annotations
from datetime import date
from typing import TYPE_CHECKING
from sqlalchemy import ForeignKey, Index, UniqueConstraint
from sqlalchemy.orm import Mapped, mapped_column, relationship
from python.orm.data_science_dev.base import DataScienceDevBase, DataScienceDevTableBase
if TYPE_CHECKING:
from python.orm.data_science_dev.congress.bill import Bill
from python.orm.data_science_dev.congress.legislator import Legislator
from python.orm.data_science_dev.congress.vote import Vote
class VoteRecord(DataScienceDevBase):
"""Links a vote to a legislator with their position (Yea, Nay, etc.)."""
__tablename__ = "vote_record"
vote_id: Mapped[int] = mapped_column(
ForeignKey("main.vote.id", ondelete="CASCADE"),
primary_key=True,
)
legislator_id: Mapped[int] = mapped_column(
ForeignKey("main.legislator.id", ondelete="CASCADE"),
primary_key=True,
)
position: Mapped[str]
vote: Mapped[Vote] = relationship("Vote", back_populates="vote_records")
legislator: Mapped[Legislator] = relationship("Legislator", back_populates="vote_records")
class Vote(DataScienceDevTableBase):
"""Roll call votes with counts and optional bill linkage."""
__tablename__ = "vote"
congress: Mapped[int]
chamber: Mapped[str]
session: Mapped[int]
number: Mapped[int]
vote_type: Mapped[str | None]
question: Mapped[str | None]
result: Mapped[str | None]
result_text: Mapped[str | None]
vote_date: Mapped[date]
yea_count: Mapped[int | None]
nay_count: Mapped[int | None]
not_voting_count: Mapped[int | None]
present_count: Mapped[int | None]
bill_id: Mapped[int | None] = mapped_column(ForeignKey("main.bill.id"))
bill: Mapped[Bill | None] = relationship("Bill", back_populates="votes")
vote_records: Mapped[list[VoteRecord]] = relationship(
"VoteRecord",
back_populates="vote",
cascade="all, delete-orphan",
)
__table_args__ = (
UniqueConstraint(
"congress",
"chamber",
"session",
"number",
name="uq_vote_congress_chamber_session_number",
),
Index("ix_vote_date", "vote_date"),
Index("ix_vote_congress_chamber", "congress", "chamber"),
)

View File

@@ -2,15 +2,9 @@
from __future__ import annotations
from python.orm.data_science_dev.congress import Bill, BillText, Legislator, Vote, VoteRecord
from python.orm.data_science_dev.posts import partitions # noqa: F401 — registers partition classes in metadata
from python.orm.data_science_dev.posts.tables import Posts
__all__ = [
"Bill",
"BillText",
"Legislator",
"Posts",
"Vote",
"VoteRecord",
]

View File

@@ -3,6 +3,7 @@
from __future__ import annotations
from python.orm.richie.base import RichieBase, TableBase, TableBaseBig, TableBaseSmall
from python.orm.richie.congress import Bill, Legislator, Vote, VoteRecord
from python.orm.richie.contact import (
Contact,
ContactNeed,
@@ -12,13 +13,17 @@ from python.orm.richie.contact import (
)
__all__ = [
"Bill",
"Contact",
"ContactNeed",
"ContactRelationship",
"Legislator",
"Need",
"RelationshipType",
"RichieBase",
"TableBase",
"TableBaseBig",
"TableBaseSmall",
"Vote",
"VoteRecord",
]

View File

@@ -0,0 +1,150 @@
"""Congress Tracker database models."""
from __future__ import annotations
from datetime import date
from sqlalchemy import ForeignKey, Index, Text, UniqueConstraint
from sqlalchemy.orm import Mapped, mapped_column, relationship
from python.orm.richie.base import RichieBase, TableBase
class Legislator(TableBase):
"""Legislator model - members of Congress."""
__tablename__ = "legislator"
# Natural key - bioguide ID is the authoritative identifier
bioguide_id: Mapped[str] = mapped_column(Text, unique=True, index=True)
# Other IDs for cross-referencing
thomas_id: Mapped[str | None]
lis_id: Mapped[str | None]
govtrack_id: Mapped[int | None]
opensecrets_id: Mapped[str | None]
fec_ids: Mapped[str | None] # JSON array stored as string
# Name info
first_name: Mapped[str]
last_name: Mapped[str]
official_full_name: Mapped[str | None]
nickname: Mapped[str | None]
# Bio
birthday: Mapped[date | None]
gender: Mapped[str | None] # M/F
# Current term info (denormalized for query efficiency)
current_party: Mapped[str | None]
current_state: Mapped[str | None]
current_district: Mapped[int | None] # House only
current_chamber: Mapped[str | None] # rep/sen
# Relationships
vote_records: Mapped[list[VoteRecord]] = relationship(
"VoteRecord",
back_populates="legislator",
cascade="all, delete-orphan",
)
class Bill(TableBase):
"""Bill model - legislation introduced in Congress."""
__tablename__ = "bill"
# Composite natural key: congress + bill_type + number
congress: Mapped[int]
bill_type: Mapped[str] # hr, s, hres, sres, hjres, sjres
number: Mapped[int]
# Bill info
title: Mapped[str | None]
title_short: Mapped[str | None]
official_title: Mapped[str | None]
# Status
status: Mapped[str | None]
status_at: Mapped[date | None]
# Sponsor
sponsor_bioguide_id: Mapped[str | None]
# Subjects
subjects_top_term: Mapped[str | None]
# Relationships
votes: Mapped[list[Vote]] = relationship(
"Vote",
back_populates="bill",
)
__table_args__ = (
UniqueConstraint("congress", "bill_type", "number", name="uq_bill_congress_type_number"),
Index("ix_bill_congress", "congress"),
)
class Vote(TableBase):
"""Vote model - roll call votes in Congress."""
__tablename__ = "vote"
# Composite natural key: congress + chamber + session + number
congress: Mapped[int]
chamber: Mapped[str] # house/senate
session: Mapped[int]
number: Mapped[int]
# Vote details
vote_type: Mapped[str | None]
question: Mapped[str | None]
result: Mapped[str | None]
result_text: Mapped[str | None]
# Timing
vote_date: Mapped[date]
# Vote counts (denormalized for efficiency)
yea_count: Mapped[int | None]
nay_count: Mapped[int | None]
not_voting_count: Mapped[int | None]
present_count: Mapped[int | None]
# Related bill (optional - not all votes are on bills)
bill_id: Mapped[int | None] = mapped_column(ForeignKey("main.bill.id"))
# Relationships
bill: Mapped[Bill | None] = relationship("Bill", back_populates="votes")
vote_records: Mapped[list[VoteRecord]] = relationship(
"VoteRecord",
back_populates="vote",
cascade="all, delete-orphan",
)
__table_args__ = (
UniqueConstraint("congress", "chamber", "session", "number", name="uq_vote_congress_chamber_session_number"),
Index("ix_vote_date", "vote_date"),
Index("ix_vote_congress_chamber", "congress", "chamber"),
)
class VoteRecord(RichieBase):
"""Association table: Vote <-> Legislator with position."""
__tablename__ = "vote_record"
vote_id: Mapped[int] = mapped_column(
ForeignKey("main.vote.id", ondelete="CASCADE"),
primary_key=True,
)
legislator_id: Mapped[int] = mapped_column(
ForeignKey("main.legislator.id", ondelete="CASCADE"),
primary_key=True,
)
position: Mapped[str] # Yea, Nay, Not Voting, Present
# Relationships
vote: Mapped[Vote] = relationship("Vote", back_populates="vote_records")
legislator: Mapped[Legislator] = relationship("Legislator", back_populates="vote_records")

View File

@@ -63,9 +63,9 @@ class DeviceRegistry:
return
with Session(self.engine) as session:
device = session.scalars(
device = session.execute(
select(SignalDevice).where(SignalDevice.phone_number == phone_number)
).one_or_none()
).scalar_one_or_none()
if device:
if device.safety_number != safety_number and device.trust_level != TrustLevel.BLOCKED:
@@ -99,9 +99,9 @@ class DeviceRegistry:
Returns True if the device was found and verified.
"""
with Session(self.engine) as session:
device = session.scalars(
device = session.execute(
select(SignalDevice).where(SignalDevice.phone_number == phone_number)
).one_or_none()
).scalar_one_or_none()
if not device:
logger.warning(f"Cannot verify unknown device: {phone_number}")
@@ -139,9 +139,9 @@ class DeviceRegistry:
def grant_role(self, phone_number: str, role: Role) -> bool:
"""Add a role to a device. Called by admin over SSH."""
with Session(self.engine) as session:
device = session.scalars(
device = session.execute(
select(SignalDevice).where(SignalDevice.phone_number == phone_number)
).one_or_none()
).scalar_one_or_none()
if not device:
logger.warning(f"Cannot grant role for unknown device: {phone_number}")
@@ -150,7 +150,7 @@ class DeviceRegistry:
if any(record.name == role for record in device.roles):
return True
role_record = session.scalars(select(RoleRecord).where(RoleRecord.name == role)).one_or_none()
role_record = session.execute(select(RoleRecord).where(RoleRecord.name == role)).scalar_one_or_none()
if not role_record:
logger.warning(f"Unknown role: {role}")
@@ -165,9 +165,9 @@ class DeviceRegistry:
def revoke_role(self, phone_number: str, role: Role) -> bool:
"""Remove a role from a device. Called by admin over SSH."""
with Session(self.engine) as session:
device = session.scalars(
device = session.execute(
select(SignalDevice).where(SignalDevice.phone_number == phone_number)
).one_or_none()
).scalar_one_or_none()
if not device:
logger.warning(f"Cannot revoke role for unknown device: {phone_number}")
@@ -182,16 +182,16 @@ class DeviceRegistry:
def set_roles(self, phone_number: str, roles: list[Role]) -> bool:
"""Replace all roles for a device. Called by admin over SSH."""
with Session(self.engine) as session:
device = session.scalars(
device = session.execute(
select(SignalDevice).where(SignalDevice.phone_number == phone_number)
).one_or_none()
).scalar_one_or_none()
if not device:
logger.warning(f"Cannot set roles for unknown device: {phone_number}")
return False
role_names = [str(role) for role in roles]
records = session.scalars(select(RoleRecord).where(RoleRecord.name.in_(role_names))).all()
records = list(session.execute(select(RoleRecord).where(RoleRecord.name.in_(role_names))).scalars().all())
device.roles = records
session.commit()
self._update_cache(phone_number, device)
@@ -203,7 +203,7 @@ class DeviceRegistry:
def list_devices(self) -> list[SignalDevice]:
"""Return all known devices."""
with Session(self.engine) as session:
return list(session.scalars(select(SignalDevice)).all())
return list(session.execute(select(SignalDevice)).scalars().all())
def sync_identities(self) -> None:
"""Pull identity list from signal-cli and record any new ones."""
@@ -226,7 +226,9 @@ class DeviceRegistry:
def _load_device(self, phone_number: str) -> SignalDevice | None:
"""Fetch a device by phone number (with joined roles)."""
with Session(self.engine) as session:
return session.scalars(select(SignalDevice).where(SignalDevice.phone_number == phone_number)).one_or_none()
return session.execute(
select(SignalDevice).where(SignalDevice.phone_number == phone_number)
).scalar_one_or_none()
def _update_cache(self, phone_number: str, device: SignalDevice) -> None:
"""Refresh the cache entry for a device."""
@@ -242,9 +244,9 @@ class DeviceRegistry:
def _set_trust(self, phone_number: str, level: str, log_msg: str | None = None) -> bool:
"""Update the trust level for a device."""
with Session(self.engine) as session:
device = session.scalars(
device = session.execute(
select(SignalDevice).where(SignalDevice.phone_number == phone_number)
).one_or_none()
).scalar_one_or_none()
if not device:
return False
@@ -267,7 +269,7 @@ def sync_roles(engine: Engine) -> None:
expected = {role.value for role in Role}
with Session(engine) as session:
existing = set(session.scalars(select(RoleRecord.name)).all())
existing = {record.name for record in session.execute(select(RoleRecord)).scalars().all()}
to_add = expected - existing
to_remove = existing - expected

View File

@@ -34,9 +34,8 @@ def main(config_file: Path) -> None:
logger.error(msg)
signal_alert(msg)
continue
count_lookup = get_count_lookup(config_file, dataset.name)
logger.info(f"using {count_lookup} for {dataset.name}")
get_snapshots_to_delete(dataset, count_lookup)
get_snapshots_to_delete(dataset, get_count_lookup(config_file, dataset.name))
except Exception:
logger.exception("snapshot_manager failed")
signal_alert("snapshot_manager failed")
@@ -100,7 +99,6 @@ def get_snapshots_to_delete(
"""
snapshots = dataset.get_snapshots()
logger.info(f"calculating snapshots for {dataset.name} to be deleted")
if not snapshots:
logger.info(f"{dataset.name} has no snapshots")
return

View File

@@ -1,9 +1,9 @@
{ inputs, pkgs, ... }:
{ inputs, ... }:
{
imports = [
"${inputs.self}/users/richie"
"${inputs.self}/users/math"
"${inputs.self}/common/global"
"${inputs.self}/common/optional/desktop.nix"
"${inputs.self}/common/optional/docker.nix"
"${inputs.self}/common/optional/scanner.nix"
"${inputs.self}/common/optional/steam.nix"
@@ -19,11 +19,6 @@
./llms.nix
];
boot = {
kernelPackages = pkgs.linuxPackages_6_18;
zfs.package = pkgs.zfs_2_4;
};
networking = {
hostName = "bob";
hostId = "7c678a41";

View File

@@ -23,7 +23,6 @@
"magistral:24b"
"ministral-3:14b"
"nemotron-3-nano:30b"
"nemotron-3-nano:4b"
"nemotron-cascade-2:30b"
"qwen3-coder:30b"
"qwen3-embedding:0.6b"

View File

@@ -15,7 +15,6 @@ in
"${inputs.self}/common/optional/zerotier.nix"
./docker
./services
./web_services
./hardware.nix
./networking.nix
./programs.nix

View File

@@ -21,8 +21,7 @@ sudo zfs create media/secure/docker -o compression=zstd-9
sudo zfs create media/secure/github-runners -o compression=zstd-9 -o sync=disabled
sudo zfs create media/secure/home_assistant -o compression=zstd-19
sudo zfs create media/secure/notes -o copies=2
sudo zfs create media/secure/postgres -o mountpoint=/zfs/media/database/postgres -o recordsize=16k -o primarycache=metadata
sudo zfs create media/secure/postgres-wal -o mountpoint=/zfs/media/database/postgres-wal -o recordsize=32k -o primarycache=metadata -o special_small_blocks=32K -o compression=lz4 -o secondarycache=none -o logbias=latency
sudo zfs create media/secure/postgres -o recordsize=16k -o primarycache=metadata
sudo zfs create media/secure/services -o compression=zstd-9
sudo zfs create media/secure/share -o mountpoint=/zfs/media/share -o exec=off

View File

@@ -0,0 +1,96 @@
{
pkgs,
inputs,
...
}:
let
commonEnv = {
PYTHONPATH = "${inputs.self}";
KAFKA_BOOTSTRAP_SERVERS = "localhost:9092";
BLUESKY_FIREHOSE_TOPIC = "bluesky.firehose.posts";
};
commonServiceConfig = {
Type = "simple";
WorkingDirectory = "${inputs.self}";
User = "richie";
Group = "users";
Restart = "on-failure";
RestartSec = "10s";
StandardOutput = "journal";
StandardError = "journal";
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = "read-only";
PrivateTmp = true;
ReadOnlyPaths = [ "${inputs.self}" ];
};
in
{
systemd.services.bluesky-firehose-topic-init = {
description = "Create Kafka topic for Bluesky firehose";
after = [ "apache-kafka.service" ];
requires = [ "apache-kafka.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart = pkgs.writeShellScript "create-bluesky-topic" ''
${pkgs.apacheKafka}/bin/kafka-topics.sh \
--bootstrap-server localhost:9092 \
--create \
--if-not-exists \
--topic bluesky.firehose.posts \
--partitions 6 \
--replication-factor 1
'';
};
};
systemd.services.bluesky-firehose-producer = {
description = "Bluesky Jetstream to Kafka producer";
after = [
"network.target"
"apache-kafka.service"
"bluesky-firehose-topic-init.service"
];
requires = [
"apache-kafka.service"
"bluesky-firehose-topic-init.service"
];
wantedBy = [ "multi-user.target" ];
environment = commonEnv;
serviceConfig = commonServiceConfig // {
ExecStart = "${pkgs.my_python}/bin/python -m python.data_science.firehose_producer";
};
};
systemd.services.bluesky-firehose-consumer = {
description = "Bluesky Kafka to PostgreSQL consumer";
after = [
"network.target"
"apache-kafka.service"
"bluesky-firehose-topic-init.service"
"postgresql.service"
];
requires = [
"apache-kafka.service"
"bluesky-firehose-topic-init.service"
"postgresql.service"
];
wantedBy = [ "multi-user.target" ];
environment = commonEnv // {
DATA_SCIENCE_DEV_DB = "data_science_dev";
DATA_SCIENCE_DEV_USER = "richie";
DATA_SCIENCE_DEV_HOST = "/run/postgresql";
DATA_SCIENCE_DEV_PORT = "5432";
};
serviceConfig = commonServiceConfig // {
ExecStart = "${pkgs.my_python}/bin/python -m python.data_science.firehose_consumer";
};
};
}

View File

@@ -0,0 +1,17 @@
{ pkgs, ... }:
let
vars = import ../vars.nix;
in
{
systemd.services.cloud_flare_tunnel = {
description = "cloud_flare_tunnel proxy's traffic through cloudflare";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
EnvironmentFile = "${vars.secrets}/docker/cloud_flare_tunnel";
ExecStart = "${pkgs.cloudflared}/bin/cloudflared --no-autoupdate tunnel run";
Restart = "on-failure";
};
};
}

View File

@@ -2,10 +2,7 @@ let
vars = import ../vars.nix;
in
{
networking.firewall.allowedTCPPorts = [
6443
2223
];
networking.firewall.allowedTCPPorts = [ 6443 ];
services.gitea = {
enable = true;
@@ -27,7 +24,7 @@ in
ROOT_URL = "https://gitea.tmmworkshop.com/";
HTTP_PORT = 6443;
SSH_PORT = 2223;
SSH_LISTEN_PORT = 2223;
SSH_LISTEN_PORT = 2224;
START_SSH_SERVER = true;
PUBLIC_URL_DETECTION = "auto";
};

View File

@@ -6,7 +6,6 @@ global
defaults
log global
mode http
option httplog
retries 3
maxconn 2000
timeout connect 5s
@@ -23,37 +22,25 @@ defaults
#Application Setup
frontend ContentSwitching
bind *:80 v4v6
bind *:443 v4v6 ssl crt /var/lib/acme/audiobookshelf.tmmworkshop.com/full.pem crt /var/lib/acme/cache.tmmworkshop.com/full.pem crt /var/lib/acme/jellyfin.tmmworkshop.com/full.pem crt /var/lib/acme/share.tmmworkshop.com/full.pem crt /var/lib/acme/gitea.tmmworkshop.com/full.pem
bind *:443 v4v6 ssl crt /zfs/storage/secrets/docker/cloudflare.pem
mode http
# ACME challenge routing (must be first)
acl is_acme path_beg /.well-known/acme-challenge/
use_backend acme_challenge if is_acme
# tmmworkshop.com
acl host_audiobookshelf hdr(host) -i audiobookshelf.tmmworkshop.com
acl host_cache hdr(host) -i cache.tmmworkshop.com
acl host_jellyfin hdr(host) -i jellyfin.tmmworkshop.com
acl host_share hdr(host) -i share.tmmworkshop.com
acl host_gcw hdr(host) -i gcw.tmmworkshop.com
acl host_n8n hdr(host) -i n8n.tmmworkshop.com
acl host_gitea hdr(host) -i gitea.tmmworkshop.com
# Hosts allowed to serve plain HTTP (add entries to skip the HTTPS redirect)
acl allow_http hdr(host) -i __none__
# acl allow_http hdr(host) -i example.tmmworkshop.com
# Redirect all HTTP to HTTPS unless on the allow list or ACME challenge
http-request redirect scheme https code 301 if !{ ssl_fc } !allow_http !is_acme
use_backend audiobookshelf_nodes if host_audiobookshelf
use_backend cache_nodes if host_cache
use_backend jellyfin if host_jellyfin
use_backend share_nodes if host_share
use_backend gcw_nodes if host_gcw
use_backend n8n if host_n8n
use_backend gitea if host_gitea
backend acme_challenge
mode http
server acme 127.0.0.1:8402
backend audiobookshelf_nodes
mode http
server server 127.0.0.1:8000
@@ -73,6 +60,14 @@ backend share_nodes
mode http
server server 127.0.0.1:8091
backend gcw_nodes
mode http
server server 127.0.0.1:8092
backend n8n
mode http
server server 127.0.0.1:5678
backend gitea
mode http
server server 127.0.0.1:6443

View File

@@ -1,24 +0,0 @@
{
services.hedgedoc = {
enable = true;
settings = {
host = "0.0.0.0";
port = 3000;
domain = "192.168.90.40";
urlAddPort = true;
protocolUseSSL = false;
db = {
dialect = "postgres";
database = "hedgedoc";
username = "hedgedoc";
host = "/run/postgresql";
};
};
};
networking.firewall.allowedTCPPorts = [ 3000 ];
systemd.services.hedgedoc = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
}

View File

@@ -7,13 +7,6 @@ in
settings = {
listeners = [ "PLAINTEXT://localhost:9092" ];
"log.dirs" = [ vars.kafka ];
"num.partitions" = 6;
"default.replication.factor" = 1;
"log.retention.hours" = 168;
"log.retention.bytes" = 10737418240;
"log.segment.bytes" = 1073741824;
"log.cleanup.policy" = "delete";
"auto.create.topics.enable" = false;
};
};
}

View File

@@ -5,10 +5,6 @@ in
{
networking.firewall.allowedTCPPorts = [ 5432 ];
# Symlink pg_wal to a ZFS dataset on the special (metadata) vdev for fast WAL writes
# this is required for systemd sandboxing
systemd.services.postgresql.serviceConfig.ReadWritePaths = [ "/zfs/media/database/postgres-wal" ];
services.postgresql = {
enable = true;
package = pkgs.postgresql_17_jit;
@@ -37,9 +33,6 @@ in
# signalbot
local signalbot signalbot trust
# hedgedoc
local hedgedoc hedgedoc trust
# math
local postgres math trust
host postgres math 127.0.0.1/32 trust
@@ -119,19 +112,11 @@ in
login = true;
};
}
{
name = "hedgedoc";
ensureDBOwnership = true;
ensureClauses = {
login = true;
};
}
];
ensureDatabases = [
"data_science_dev"
"hass"
"gitea"
"hedgedoc"
"math"
"n8n"
"richie"

View File

@@ -1,6 +1,7 @@
zpool = ["root_pool", "storage", "media"]
services = [
"audiobookshelf",
"cloud_flare_tunnel",
"haproxy",
"docker",
"home-assistant",

View File

@@ -4,7 +4,6 @@ hourly = 24
daily = 0
monthly = 0
# root_pool
["root_pool/home"]
15_min = 8
hourly = 24
@@ -28,96 +27,57 @@ monthly = 0
hourly = 24
daily = 30
monthly = 6
# storage
["storage/ollama"]
15_min = 2
hourly = 0
daily = 0
monthly = 0
["storage/secure"]
15_min = 0
hourly = 0
daily = 0
monthly = 0
["storage/secure/plex"]
["storage/plex"]
15_min = 6
hourly = 2
daily = 1
monthly = 0
["storage/secure/transmission"]
15_min = 4
hourly = 0
daily = 0
["media/plex"]
15_min = 6
hourly = 2
daily = 1
monthly = 0
["storage/secure/secrets"]
["media/notes"]
15_min = 8
hourly = 24
daily = 30
monthly = 12
# media
["media/temp"]
15_min = 2
hourly = 0
daily = 0
monthly = 0
["media/secure"]
15_min = 0
hourly = 0
daily = 0
monthly = 0
["media/secure/plex"]
15_min = 6
hourly = 2
daily = 1
monthly = 0
["media/secure/postgres-wal"]
15_min = 4
hourly = 2
daily = 0
monthly = 0
["media/secure/postgres"]
15_min = 8
hourly = 24
daily = 7
monthly = 0
["media/secure/share"]
15_min = 4
hourly = 0
daily = 0
monthly = 0
["media/secure/github-runners"]
15_min = 6
hourly = 2
daily = 1
monthly = 0
["media/secure/notes"]
15_min = 8
hourly = 24
daily = 30
monthly = 12
["media/secure/docker"]
["media/docker"]
15_min = 3
hourly = 12
daily = 14
monthly = 2
["media/services"]
15_min = 3
hourly = 12
daily = 14
monthly = 2
["media/home_assistant"]
15_min = 3
hourly = 12
daily = 14
monthly = 2
# scratch
["scratch/transmission"]
15_min = 2
15_min = 0
hourly = 0
daily = 0
monthly = 0
["storage/transmission"]
15_min = 0
hourly = 0
daily = 0
monthly = 0
["storage/ollama"]
15_min = 0
hourly = 0
daily = 0
monthly = 0

View File

@@ -1,62 +0,0 @@
let
domains = [
"audiobookshelf"
"cache"
"gitea"
"jellyfin"
"share"
];
makeCert = name: {
name = "${name}.tmmworkshop.com";
value = {
webroot = "/var/lib/acme/.challenges";
group = "acme";
reloadServices = [ "haproxy.service" ];
};
};
acmeServices = map (domain: "acme-${domain}.tmmworkshop.com.service") domains;
in
{
users.users.haproxy.extraGroups = [ "acme" ];
security.acme = {
acceptTerms = true;
defaults.email = "Richie@tmmworkshop.com";
certs = builtins.listToAttrs (map makeCert domains);
};
# Minimal nginx to serve ACME HTTP-01 challenge files for HAProxy
services.nginx = {
enable = true;
virtualHosts."acme-challenge" = {
listen = [
{
addr = "127.0.0.1";
port = 8402;
}
];
locations."/.well-known/acme-challenge/" = {
root = "/var/lib/acme/.challenges";
};
};
};
# Ensure the challenge directory exists with correct permissions
systemd.tmpfiles.rules = [
"d /var/lib/acme/.challenges 0750 acme acme - -"
"d /var/lib/acme/.challenges/.well-known 0750 acme acme - -"
"d /var/lib/acme/.challenges/.well-known/acme-challenge 0750 acme acme - -"
];
users.users.nginx.extraGroups = [ "acme" ];
# HAProxy needs certs to exist before it can bind :443.
# NixOS's acme module generates self-signed placeholders on first boot
# via acme-<domain>.service — just make HAProxy wait for them.
systemd.services.haproxy = {
after = acmeServices;
wants = acmeServices;
};
}

View File

@@ -1,9 +0,0 @@
{ lib, ... }:
{
imports =
let
files = builtins.attrNames (builtins.readDir ./.);
nixFiles = builtins.filter (name: lib.hasSuffix ".nix" name && name != "default.nix") files;
in
map (file: ./. + "/${file}") nixFiles;
}

View File

@@ -14,7 +14,6 @@
./llms.nix
./open_webui.nix
./qmk.nix
./sunshine.nix
./syncthing.nix
inputs.nixos-hardware.nixosModules.framework-13-7040-amd
];

View File

@@ -8,8 +8,9 @@
"deepscaler:1.5b"
"deepseek-r1:8b"
"gemma3:12b"
"gemma3:27b"
"gpt-oss:20b"
"lfm2:24b"
"nemotron-3-nano:4b"
"qwen3:14b"
"qwen3.5:27b"
];

View File

@@ -1,28 +0,0 @@
{ pkgs, ... }:
{
services.sunshine = {
enable = true;
openFirewall = true;
capSysAdmin = true;
};
environment.systemPackages = [ pkgs.kdePackages.libkscreen ];
boot = {
kernelParams = [
"drm.edid_firmware=DP-4:edid/virtual-display.bin"
"video=DP-4:e"
];
};
hardware.firmware = [
(pkgs.runCommandLocal "virtual-display-edid"
{
compressFirmware = false;
}
''
mkdir -p $out/lib/firmware/edid
cp ${./edid/virtual-display.bin} $out/lib/firmware/edid/virtual-display.bin
''
)
];
}

View File

@@ -210,9 +210,9 @@ class TestContactCache:
mock_session_cls.return_value.__exit__ = MagicMock(return_value=False)
mock_device = MagicMock()
mock_device.trust_level = TrustLevel.UNVERIFIED
mock_session.scalars.return_value.one_or_none.return_value = mock_device
mock_session.execute.return_value.scalar_one_or_none.return_value = mock_device
registry.record_contact("+1234", "abc")
mock_session.scalars.assert_called_once()
mock_session.execute.assert_called_once()
class TestLocationCommand:

View File

@@ -1,5 +0,0 @@
{
imports = [
../home/global.nix
];
}

View File

@@ -20,15 +20,15 @@
// turns off all sounds and announcements
"accessibility.signals.terminalCommandFailed": {
"sound": "off",
"announcement": "off",
"announcement": "off"
},
"accessibility.signals.terminalQuickFix": {
"sound": "off",
"announcement": "off",
"announcement": "off"
},
"accessibility.signals.terminalBell": {
"sound": "off",
"announcement": "off",
"announcement": "off"
},
// database settings
@@ -41,8 +41,8 @@
"driver": "PostgreSQL",
"name": "main",
"database": "postgres",
"username": "richie",
},
"username": "richie"
}
],
// formatters
@@ -55,7 +55,7 @@
"[yaml]": { "editor.defaultFormatter": "redhat.vscode-yaml" },
"[javascriptreact]": { "editor.defaultFormatter": "esbenp.prettier-vscode" },
"[github-actions-workflow]": {
"editor.defaultFormatter": "redhat.vscode-yaml",
"editor.defaultFormatter": "redhat.vscode-yaml"
},
"[dockercompose]": {
"editor.insertSpaces": true,
@@ -64,9 +64,9 @@
"editor.quickSuggestions": {
"other": true,
"comments": false,
"strings": true,
"strings": true
},
"editor.defaultFormatter": "redhat.vscode-yaml",
"editor.defaultFormatter": "redhat.vscode-yaml"
},
// spell check
@@ -78,8 +78,7 @@
"Corvidae",
"drivername",
"fastapi",
"sandboxing",
"syncthing",
"syncthing"
],
// nix
@@ -97,5 +96,5 @@
// new
"hediet.vscode-drawio.resizeImages": null,
"hediet.vscode-drawio.appearance": "automatic",
"claudeCode.preferredLocation": "panel",
"claudeCode.preferredLocation": "panel"
}

View File

@@ -1,5 +1,6 @@
{
imports = [
../home/global.nix
../home/gui
];
}