feat(backend): refactor mono repository

This commit is contained in:
2025-08-27 11:04:56 -04:00
parent d0dbba21fb
commit be1c729220
37 changed files with 2534 additions and 452 deletions

View File

@@ -1,22 +0,0 @@
# backend/database/models/__init__.py
from .classification import Classification
from .member import Member
from .servicetype import ServiceType
from .service import Service
from .serviceavailability import ServiceAvailability
from .schedule import Schedule
from .acceptedlog import AcceptedLog
from .declinelog import DeclineLog
from .scheduledlog import ScheduledLog
__all__ = [
"Classification",
"Member",
"ServiceType",
"Service",
"ServiceAvailability",
"Schedule",
"AcceptedLog",
"DeclineLog",
"ScheduledLog",
]

View File

@@ -1,49 +0,0 @@
from __future__ import annotations
from dataclasses import dataclass, asdict, fields
from datetime import date, datetime
from typing import Any, Dict, Tuple, Type, TypeVar, Union
Row = Tuple[Any, ...] | Dict[str, Any] # what sqlite3.Row returns
T = TypeVar("T", bound="BaseModel")
@dataclass()
class BaseModel:
"""A tiny helper that gives every model a common interface."""
@classmethod
def from_row(cls: Type[T], row: Row) -> T:
"""
Build a model instance from a sqlite3.Row (or a dictlike object).
Column names are matched to the dataclass field names.
"""
if isinstance(row, dict):
data = row
else: # sqlite3.Row behaves like a mapping, but we guard for safety
data = dict(row)
# Convert raw strings to proper Python types where we know the annotation
converted: Dict[str, Any] = {}
for f in fields(cls):
value = data.get(f.name)
if value is None:
converted[f.name] = None
continue
# datetime/date handling sqlite returns str in ISO format
if f.type is datetime:
converted[f.name] = datetime.fromisoformat(value)
elif f.type is date:
converted[f.name] = date.fromisoformat(value)
else:
converted[f.name] = value
return cls(**converted) # type: ignore[arg-type]
def to_dict(self) -> Dict[str, Any]:
"""Return a plain dict (useful for INSERT/UPDATE statements)."""
return asdict(self)
def __repr__(self) -> str: # a nicer representation when printing
field_vals = ", ".join(f"{f.name}={getattr(self, f.name)!r}" for f in fields(self))
return f"{self.__class__.__name__}({field_vals})"

View File

@@ -1,11 +0,0 @@
from dataclasses import dataclass
from datetime import datetime
from ._base import BaseModel
@dataclass()
class AcceptedLog(BaseModel):
LogId: int
MemberId: int
ServiceId: int
AcceptedAt: datetime

View File

@@ -1,8 +0,0 @@
from dataclasses import dataclass
from ._base import BaseModel
@dataclass()
class Classification(BaseModel):
ClassificationId: int
ClassificationName: str

View File

@@ -1,14 +0,0 @@
from dataclasses import dataclass
from datetime import datetime, date
from typing import Optional
from ._base import BaseModel
@dataclass()
class DeclineLog(BaseModel):
DeclineId: int
MemberId: int
ServiceId: int
DeclinedAt: datetime
DeclineDate: date # the service day that was declined
Reason: Optional[str] = None

View File

@@ -1,20 +0,0 @@
from dataclasses import dataclass
from datetime import datetime, date
from typing import Optional
from ._base import BaseModel
@dataclass
class Member(BaseModel):
MemberId: int
FirstName: str
LastName: str
Email: Optional[str] = None
PhoneNumber: Optional[str] = None
ClassificationId: Optional[int] = None
Notes: Optional[str] = None
IsActive: int = 1
LastScheduledAt: Optional[datetime] = None
LastAcceptedAt: Optional[datetime] = None
LastDeclinedAt: Optional[datetime] = None
DeclineStreak: int = 0

View File

@@ -1,17 +0,0 @@
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from ._base import BaseModel
@dataclass
class Schedule(BaseModel):
ScheduleId: int
ServiceId: int
MemberId: int
Status: str # 'pending' | 'accepted' | 'declined'
ScheduledAt: datetime # renamed from OfferedAt
AcceptedAt: Optional[datetime] = None
DeclinedAt: Optional[datetime] = None
ExpiresAt: Optional[datetime] = None
DeclineReason: Optional[str] = None

View File

@@ -1,12 +0,0 @@
from dataclasses import dataclass
from datetime import datetime
from ._base import BaseModel
@dataclass()
class ScheduledLog(BaseModel):
LogId: int
MemberId: int
ServiceId: int
ScheduledAt: datetime
ExpiresAt: datetime

View File

@@ -1,10 +0,0 @@
from dataclasses import dataclass
from datetime import date
from ._base import BaseModel
@dataclass()
class Service(BaseModel):
ServiceId: int
ServiceTypeId: int
ServiceDate: date

View File

@@ -1,9 +0,0 @@
from dataclasses import dataclass
from ._base import BaseModel
@dataclass()
class ServiceAvailability(BaseModel):
ServiceAvailabilityId: int
MemberId: int
ServiceTypeId: int

View File

@@ -1,8 +0,0 @@
from dataclasses import dataclass
from ._base import BaseModel
@dataclass()
class ServiceType(BaseModel):
ServiceTypeId: int
TypeName: str

View File

@@ -1,3 +1,3 @@
# database/__init__.py
from .connection import DatabaseConnection
from .repository import Repository
from .base_repository import BaseRepository

View File

@@ -0,0 +1,109 @@
# backend/db/base_repository.py
from __future__ import annotations
from typing import TypeVar, Generic, List, Sequence, Any, Mapping, Tuple
from .connection import DatabaseConnection
# Generic type for the model (your dataclasses such as Member, Service, …)
T = TypeVar("T")
class BaseRepository(Generic[T]):
"""
Very small generic repository that knows how to:
* INSERT a dataclasslike object (any object that implements ``to_dict`` and
``from_row``)
* SELECT all rows from a table and turn them into model instances
* (optionally) UPDATE or DELETE rows stubs are provided for future use
"""
def __init__(self, db: DatabaseConnection):
self.db = db
# ------------------------------------------------------------------
# INSERT
# ------------------------------------------------------------------
def _insert(self, table: str, obj: T, pk_name: str) -> T:
"""
Insert ``obj`` into ``table`` and populate the autoincrement primarykey
field named ``pk_name`` on the original object.
The model **must** implement:
* ``to_dict() -> Mapping[str, Any]`` returns a mapping of column →
value (including the PK, which we drop here)
* ``from_row(row: sqlite3.Row) -> Model`` classmethod used by
``_select_all``.
"""
# 1⃣ Turn the model into a plain dict and drop the PK column.
data: Mapping[str, Any] = obj.to_dict() # type: ignore[attr-defined]
if pk_name not in data:
raise ValueError(f"Primarykey column '{pk_name}' not found in model data.")
# Remove the autoincrement column SQLite will fill it in.
data_without_pk = {k: v for k, v in data.items() if k != pk_name}
# 2⃣ Build the column list and matching placeholders.
columns = ", ".join(data_without_pk.keys())
placeholders = ", ".join("?" for _ in data_without_pk)
sql = f"INSERT INTO {table} ({columns}) VALUES ({placeholders})"
# 3⃣ Execute the statement with a *tuple* of values that matches the
# number of placeholders.
cursor = self.db.execute(sql, tuple(data_without_pk.values()))
# 4⃣ SQLite gives us the newly generated rowid on the cursor.
setattr(obj, pk_name, cursor.lastrowid) # type: ignore[attr-defined]
return obj
# ------------------------------------------------------------------
# SELECT ALL
# ------------------------------------------------------------------
def _select_all(self, table: str, model_cls: type[T]) -> List[T]:
"""
Return every row from ``table`` as a list of ``model_cls`` instances.
``model_cls`` must provide a ``from_row`` classmethod that accepts a
``sqlite3.Row`` and returns an instantiated model.
"""
rows = self.db.fetchall(f"SELECT * FROM {table}")
return [model_cls.from_row(r) for r in rows] # type: ignore[attr-defined]
# ------------------------------------------------------------------
# OPTIONAL UPDATE helper (you can call it from concrete repos)
# ------------------------------------------------------------------
def _update(
self,
table: str,
pk_name: str,
pk_value: Any,
updates: Mapping[str, Any],
) -> None:
"""
Simple UPDATE helper.
Example:
repo._update(
table="Members",
pk_name="MemberId",
pk_value=42,
updates={"IsActive": 0, "Notes": "temporarily disabled"},
)
"""
if not updates:
return # nothing to do
set_clause = ", ".join(f"{col}=?" for col in updates)
sql = f"UPDATE {table} SET {set_clause} WHERE {pk_name} = ?"
params: Tuple[Any, ...] = tuple(updates.values()) + (pk_value,)
self.db.execute(sql, params)
# ------------------------------------------------------------------
# OPTIONAL DELETE helper
# ------------------------------------------------------------------
def _delete(self, table: str, pk_name: str, pk_value: Any) -> None:
"""
Delete a row by primary key.
"""
sql = f"DELETE FROM {table} WHERE {pk_name} = ?"
self.db.execute(sql, (pk_value,))

View File

@@ -1,4 +1,3 @@
# backend/database/connection.py
"""
Thin convenience layer over the builtin ``sqlite3`` module.
@@ -115,21 +114,29 @@ class DatabaseConnection:
# -----------------------------------------------------------------
# Public API the four methods used throughout the code base
# -----------------------------------------------------------------
def execute(self, sql: str, params: Optional[Tuple[Any, ...]] = None) -> None:
def execute(self, sql: str, params: Optional[Tuple[Any, ...]] = None) -> sqlite3.Cursor:
"""
Run an INSERT/UPDATE/DELETE statement and commit immediately.
``params`` may be ``None`` (no placeholders) or a tuple of values.
Returns the underlying ``sqlite3.Cursor`` so callers can inspect
``lastrowid``, ``rowcount`` etc. This mirrors the behaviour of the
standard ``sqlite3.Connection.execute`` method.
"""
try:
if params is None:
self._cursor.execute(sql)
cursor = self._cursor.execute(sql) # ← capture cursor
else:
self._cursor.execute(sql, params)
cursor = self._cursor.execute(sql, params) # ← capture cursor
self._conn.commit()
except Exception:
# Ensure we dont leave the connection in a halfcommitted state.
self._conn.rollback()
return cursor # ← **return it**
except sqlite3.Error as exc:
# Keep the original error handling but reraise after logging.
# self._logger.error(
# "SQL execution error: %s SQL: %s Params: %s",
# exc,
# sql,
# params,
# )
raise
def fetchone(
@@ -160,6 +167,15 @@ class DatabaseConnection:
self._cursor.execute(sql, params)
return self._cursor.fetchall()
def executescript(self, script: str) -> None:
"""Convenient wrapper for sqlite3.Connection.executescript."""
try:
self._conn.executescript(script)
self._conn.commit()
except Exception:
self._conn.rollback()
raise
def close(self) -> None:
"""Close the underlying SQLite connection."""
# ``cursor`` is automatically closed when the connection closes,

View File

@@ -1,292 +1,193 @@
import os
import datetime as dt
import sqlite3
import random
# demo.py
# ------------------------------------------------------------
# Demonstration script that creates a few services, loads the
# classifications, and then schedules members for each position
# using the new SchedulingService.
# ------------------------------------------------------------
from __future__ import annotations
from pathlib import Path
from typing import List, Dict, Set, Tuple
from datetime import date, timedelta
from typing import Dict, List, Tuple, Any
# Import the concrete repository classes that talk to SQLite (or any DB you use)
from backend.repositories import (
ClassificationRepository,
MemberRepository,
ServiceRepository,
ServiceAvailabilityRepository,
ScheduleRepository
)
# The service we just wrote
from backend.services.scheduling_service import SchedulingService
# ----------------------------------------------------------------------
# 1 Helper that creates the database (runs the schema file)
# Helper return the next *n* Sundays starting from today.
# ----------------------------------------------------------------------
def init_db(db_path: Path) -> None:
"""
If the DB file does not exist, create it and run the schema.sql script.
The schema file lives in backend/database/schema.sql.
"""
if db_path.exists():
print(f"✅ Database already exists at {db_path}")
return
print(f"🗂️ Creating new SQLite DB at {db_path}")
conn = sqlite3.connect(db_path)
cur = conn.cursor()
schema_path = Path(__file__).parent / "database" / "schema.sql"
if not schema_path.is_file():
raise FileNotFoundError(f"Schema file not found: {schema_path}")
with open(schema_path, "r", encoding="utf8") as f:
sql = f.read()
cur.executescript(sql)
conn.commit()
conn.close()
print("✅ Schema executed database is ready.")
def next_n_sundays(n: int) -> list[dt.date]:
"""Return a list with the next `n` Sundays after today."""
today = dt.date.today()
# weekday(): Mon=0 … Sun=6 → we want the offset to the *next* Sunday
def next_n_sundays(n: int) -> List[date]:
"""Return a list of the next *n* Sundays (including today if today is Sunday)."""
today = date.today()
# weekday(): Monday == 0 … Sunday == 6
days_until_sunday = (6 - today.weekday()) % 7
# If today is Sunday, days_until_sunday == 0 → we still want the *next* one
days_until_sunday = days_until_sunday or 7
first_sunday = today + timedelta(days=days_until_sunday)
return [first_sunday + timedelta(weeks=i) for i in range(n)]
first_sunday = today + dt.timedelta(days=days_until_sunday)
# Build the list of n Sundays
return [first_sunday + dt.timedelta(weeks=i) for i in range(n)]
def seed_db(repo) -> None:
# ----------------------------------------------------------------------
# Demo entry point (updated for multiclassification support)
# ----------------------------------------------------------------------
def demo(
classification_repo: ClassificationRepository,
member_repo: MemberRepository,
service_repo: ServiceRepository,
availability_repo: ServiceAvailabilityRepository,
schedule_repo: ScheduleRepository,
) -> None:
"""
Populate a tiny dataset, run the roundrobin queue, accept one
schedule, decline another and print audit tables.
Populate a handful of services for the coming Sunday and run the
roundrobin scheduler for each choirposition.
The function prints what it does so you can see the flow in the console.
"""
print("\n=== 📦 Seeding reference data ===")
# ------------------------------------------------------------------
# 0⃣ Define the members we want to skip.
# ------------------------------------------------------------------
EXCLUDED_MEMBER_IDS = {20, 8, 3, 12, 4, 1, 44, 46, 28, 13, 11, 5, 16, 26, 35}
# ----- classifications -------------------------------------------------
baritone_cls = repo.create_classification("Baritone")
tenor_cls = repo.create_classification("Tenor")
alto_cls = repo.create_classification("Alto / Mezzo")
soprano_cls = repo.create_classification("Soprano")
print(f"""Created classifications →
{baritone_cls.ClassificationId}=Baritone,
{tenor_cls.ClassificationId}=Tenor,
{alto_cls.ClassificationId}=Alto,
{soprano_cls.ClassificationId}=Soprano\n""")
# ----- members --------------------------------------------------------
members = [
# 15 (Tenor)
repo.create_member("John", "Doe", "john.doe@example.com", "+155512340001", tenor_cls.ClassificationId),
repo.create_member("Mary", "Smith", "mary.smith@example.com", "+155512340002", tenor_cls.ClassificationId),
repo.create_member("David", "Lee", "david.lee@example.com", "+155512340003", tenor_cls.ClassificationId),
repo.create_member("Emma", "Clark", "emma.clark@example.com", "+155512340004", tenor_cls.ClassificationId),
repo.create_member("Jack", "Taylor", "jack.taylor@example.com", "+155512340005", tenor_cls.ClassificationId),
# 610 (Alto / Mezzo)
repo.create_member("Alice", "Brown", "alice.brown@example.com", "+155512340006", alto_cls.ClassificationId),
repo.create_member("Frank", "Davis", "frank.davis@example.com", "+155512340007", alto_cls.ClassificationId),
repo.create_member("Grace", "Miller", "grace.miller@example.com", "+155512340008", alto_cls.ClassificationId),
repo.create_member("Henry", "Wilson", "henry.wilson@example.com", "+155512340009", alto_cls.ClassificationId),
repo.create_member("Isla", "Anderson", "isla.anderson@example.com", "+155512340010", alto_cls.ClassificationId),
# 1115 (Soprano)
repo.create_member("Bob", "Johnson", "bob.johnson@example.com", "+155512340011", soprano_cls.ClassificationId),
repo.create_member("Kara", "Thomas", "kara.thomas@example.com", "+155512340012", soprano_cls.ClassificationId),
repo.create_member("Liam", "Jackson", "liam.jackson@example.com", "+155512340013", soprano_cls.ClassificationId),
repo.create_member("Mia", "White", "mia.white@example.com", "+155512340014", soprano_cls.ClassificationId),
repo.create_member("Noah", "Harris", "noah.harris@example.com", "+155512340015", soprano_cls.ClassificationId),
# 1620 (Baritone)
repo.create_member("Olivia", "Martin", "olivia.martin@example.com", "+155512340016", baritone_cls.ClassificationId),
repo.create_member("Paul", "Doe", "paul.doe@example.com", "+155512340017", baritone_cls.ClassificationId),
repo.create_member("Quinn", "Smith", "quinn.smith@example.com", "+155512340018", baritone_cls.ClassificationId),
repo.create_member("Ruth", "Brown", "ruth.brown@example.com", "+155512340019", baritone_cls.ClassificationId),
repo.create_member("Sam", "Lee", "sam.lee@example.com", "+155512340020", baritone_cls.ClassificationId),
# 2125 (Tenor again)
repo.create_member("Tina", "Clark", "tina.clark@example.com", "+155512340021", tenor_cls.ClassificationId),
repo.create_member("Umar", "Davis", "umar.davis@example.com", "+155512340022", tenor_cls.ClassificationId),
repo.create_member("Vera", "Miller", "vera.miller@example.com", "+155512340023", tenor_cls.ClassificationId),
repo.create_member("Walt", "Wilson", "walt.wilson@example.com", "+155512340024", tenor_cls.ClassificationId),
repo.create_member("Xena", "Anderson", "xena.anderson@example.com", "+155512340025", tenor_cls.ClassificationId),
# 2630 (Alto / Mezzo again)
repo.create_member("Yara", "Thomas", "yara.thomas@example.com", "+155512340026", alto_cls.ClassificationId),
repo.create_member("Zane", "Jackson", "zane.jackson@example.com", "+155512340027", alto_cls.ClassificationId),
repo.create_member("Anna", "White", "anna.white@example.com", "+155512340028", alto_cls.ClassificationId),
repo.create_member("Ben", "Harris", "ben.harris@example.com", "+155512340029", alto_cls.ClassificationId),
repo.create_member("Cara", "Martin", "cara.martin@example.com", "+155512340030", alto_cls.ClassificationId),
]
for m in members:
print(f" Member {m.MemberId}: {m.FirstName} {m.LastName} ({m.ClassificationId})")
print("\n")
print("=== 📦 Seeding Service Types & Availability ===")
# -----------------------------------------------------------------
# 1⃣ Service Types (keep the IDs for later use)
# -----------------------------------------------------------------
st_9am = repo.create_service_type("9AM")
st_11am = repo.create_service_type("11AM")
st_6pm = repo.create_service_type("6PM")
service_type_ids: Dict[str, int] = {
"9AM": st_9am.ServiceTypeId,
"11AM": st_11am.ServiceTypeId,
"6PM": st_6pm.ServiceTypeId,
}
print(
f"Created service types → "
f"{st_9am.ServiceTypeId}=9AM, "
f"{st_11am.ServiceTypeId}=11AM, "
f"{st_6pm.ServiceTypeId}=6PM"
# ------------------------------------------------------------------
# 1⃣ Build the highlevel SchedulingService from the repos.
# ------------------------------------------------------------------
scheduler = SchedulingService(
classification_repo=classification_repo,
member_repo=member_repo,
service_repo=service_repo,
availability_repo=availability_repo,
schedule_repo=schedule_repo,
)
# -----------------------------------------------------------------
# 2 Build a baseline availability map (member_id → set of ServiceTypeIds)
# -----------------------------------------------------------------
def base_availability() -> Set[int]:
"""Return a set of ServiceTypeIds the member can take."""
roll = random.random()
if roll < 0.30: # ~30% get *all* three slots
return set(service_type_ids.values())
elif roll < 0.70: # ~40% get exactly two slots
return set(random.sample(list(service_type_ids.values()), 2))
else: # ~30% get a single slot
return {random.choice(list(service_type_ids.values()))}
# ------------------------------------------------------------------
# 2Create a single Sunday of services (9AM, 11AM, 6PM).
# ------------------------------------------------------------------
# We only need one Sunday for the demo the second element of the
# list returned by ``next_n_sundays(6)`` matches the original code.
target_sunday = next_n_sundays(6)[4] # same as original slice [1:2]
print(f"🗓️ Target Sunday: {target_sunday}")
input()
# Populate the map for every member you created earlier
availability_map: Dict[int, Set[int]] = {}
for m in members: # `members` is the list you seeded above
availability_map[m.MemberId] = base_availability()
# -----------------------------------------------------------------
# 3⃣ Handcrafted overrides (edgecases you want to guarantee)
# -----------------------------------------------------------------
# Tenor block (IDs 15 & 2125) → only 9AM & 11AM
tenor_ids = [1, 2, 3, 4, 5, 21, 22, 23, 24, 25]
for mid in tenor_ids:
availability_map[mid] = {
service_type_ids["9AM"],
service_type_ids["11AM"],
}
# Baritone block (IDs 1620) → only 6PM
baritone_ids = [16, 17, 18, 19, 20]
for mid in baritone_ids:
availability_map[mid] = {service_type_ids["6PM"]}
# Ensure at least one member can do each slot (explicit adds)
availability_map[1].add(service_type_ids["9AM"]) # John Tenor → 9AM
availability_map[6].add(service_type_ids["11AM"]) # Alice Alto → 11AM
availability_map[11].add(service_type_ids["6PM"]) # Bob Soprano → 6PM
# -----------------------------------------------------------------
# 4⃣ Bulkinsert into ServiceAvailability
# -----------------------------------------------------------------
rows: List[Tuple[int, int]] = []
for member_id, type_set in availability_map.items():
for st_id in type_set:
rows.append((member_id, st_id))
for row in rows:
repo.db.execute(
"""
INSERT INTO ServiceAvailability (MemberId, ServiceTypeId)
VALUES (?, ?)
""",
row,
# Create the three service slots for that day.
service_ids_by_type: Dict[int, int] = {} # ServiceTypeId → ServiceId
for service_type_id in (1, 2, 3):
service = service_repo.create(service_type_id, target_sunday)
service_ids_by_type[service_type_id] = service.ServiceId
type_name = {1: "9AM", 2: "11AM", 3: "6PM"}[service_type_id]
print(
f"✅ Created Service → ServiceId={service.ServiceId}, "
f"ServiceType={type_name}, Date={service.ServiceDate}"
)
input()
print(
f"Inserted {len(rows)} ServiceAvailability rows "
f"(≈ {len(members)} members × avg. {len(rows)//len(members)} slots each)."
)
# ------------------------------------------------------------------
# 3⃣ Load the classification IDs well need later.
# ------------------------------------------------------------------
classifications = classification_repo.list_all()
def _cid(name: str) -> int:
return next(c.ClassificationId for c in classifications if c.ClassificationName == name)
# ----- service (the day we are scheduling) ---------------------------
service_dates = next_n_sundays(3)
services = []
for service_date in service_dates:
service = repo.create_service(st_6pm.ServiceTypeId, service_date)
print(f"Created Service → ServiceId={service.ServiceId}, Date={service.ServiceDate}")
services.append(service)
print("\n")
# --------------------------------------------------------------------
# 1⃣ Get the first Tenor and ACCEPT it
# --------------------------------------------------------------------
print("=== 🎯 FIRST SCHEDULE (should be John) ===")
scheduled_member = repo.schedule_next_member(
classification_id=soprano_cls.ClassificationId,
service_id=services[0].ServiceId,
only_active=True,
)
print(scheduled_member)
baritone_id = _cid("Baritone")
tenor_id = _cid("Tenor")
mezzo_alto_id = _cid("Alto / Mezzo")
soprano_id = _cid("Soprano")
scheduled_member = repo.schedule_next_member(
classification_id=tenor_cls.ClassificationId,
service_id=services[0].ServiceId,
only_active=True,
)
print(scheduled_member)
# ------------------------------------------------------------------
# 4⃣ Define the choirpositions and which classifications are acceptable.
# ------------------------------------------------------------------
# The mapping mirrors the comment block in the original script.
positions_to_classifications: Dict[int, List[int]] = {
1: [baritone_id, tenor_id], # 1 Baritone or Tenor
2: [tenor_id], # 2 Tenor
3: [tenor_id, mezzo_alto_id], # 3 Tenor
4: [mezzo_alto_id], # 4 Mezzo
5: [mezzo_alto_id, soprano_id], # 5 Mezzo or Soprano
6: [mezzo_alto_id, soprano_id], # 6 Mezzo or Soprano
7: [soprano_id], # 7 Soprano
8: [soprano_id], # 8 Soprano
}
scheduled_member = repo.schedule_next_member(
classification_id=tenor_cls.ClassificationId,
service_id=services[0].ServiceId,
only_active=True,
)
print(scheduled_member)
# ------------------------------------------------------------------
# 5⃣ Run the scheduler for each position on each service slot.
# ------------------------------------------------------------------
# We keep a dict so the final printout resembles the original script.
full_schedule: Dict[int, List[Tuple[int, str, str, int]]] = {}
scheduled_member = repo.schedule_next_member(
classification_id=tenor_cls.ClassificationId,
service_id=services[0].ServiceId,
only_active=True,
)
print(scheduled_member)
for service_type_id, service_id in service_ids_by_type.items():
service_type_name = {1: "9AM", 2: "11AM", 3: "6PM"}[service_type_id]
print(f"\n=== Sunday {target_sunday} @ {service_type_name} ===")
full_schedule[service_id] = []
input()
scheduled_member = repo.schedule_next_member(
classification_id=tenor_cls.ClassificationId,
service_id=services[0].ServiceId,
only_active=True,
)
print(scheduled_member)
for position, allowed_cids in positions_to_classifications.items():
# --------------------------------------------------------------
# New roundrobin path: give the whole list of allowed
# classifications to the scheduler at once.
# --------------------------------------------------------------
result = scheduler.schedule_next_member(
classification_ids=allowed_cids,
service_id=service_id,
only_active=True,
exclude_member_ids=EXCLUDED_MEMBER_IDS,
)
scheduled_member = repo.schedule_next_member(
classification_id=tenor_cls.ClassificationId,
service_id=services[2].ServiceId,
only_active=True,
)
print(scheduled_member)
# --------------------------------------------------------------
# Store the outcome either a valid schedule tuple or a placeholder.
# --------------------------------------------------------------
if result:
full_schedule[service_id].append(result)
print(f"#{position}: {result[1]} {result[2]}")
input()
else:
placeholder = (None, "", "No eligible member", None)
full_schedule[service_id].append(placeholder)
print(f"#{position}: ❓ No eligible member")
input()
# ------------------------------------------------------------------
# 6⃣ Final dump mirrors the original ``print(schedule)``.
# ------------------------------------------------------------------
print("\n🗂️ Complete schedule dictionary:")
print(full_schedule)
# ----------------------------------------------------------------------
# 2⃣ Demo that exercises the full repository API
# ----------------------------------------------------------------------
def demo(repo) -> None:
return
# ----------------------------------------------------------------------
# 5⃣ Entrypoint
# Example of wiring everything together (you would normally do this in
# your application startup code).
# ----------------------------------------------------------------------
if __name__ == "__main__":
# --------------------------------------------------------------
# Path to the SQLite file (feel free to change)
# --------------------------------------------------------------
DB_PATH = Path(__file__).parent / "database_demo.db"
from backend.db import DatabaseConnection
from backend.repositories import MemberRepository, ScheduleRepository, ServiceRepository, ServiceAvailabilityRepository
from backend.services.scheduling_service import SchedulingService
# --------------------------------------------------------------
# Initialise DB if necessary
# --------------------------------------------------------------
init_db(DB_PATH)
exit()
DB_PATH = Path(__file__).parent / "database6_accepts_and_declines.db"
# --------------------------------------------------------------
# Build the connection / repository objects
# --------------------------------------------------------------
from backend.database.connection import DatabaseConnection
from backend.database.repository import Repository
# Initialise DB connection (adjust DSN as needed)
db = DatabaseConnection(DB_PATH)
# Instantiate each repository with the shared DB connection.
classification_repo = ClassificationRepository(db)
member_repo = MemberRepository(db)
service_repo = ServiceRepository(db)
availability_repo = ServiceAvailabilityRepository(db)
schedule_repo = ScheduleRepository(db)
# Run the demo.
demo(
classification_repo,
member_repo,
service_repo,
availability_repo,
schedule_repo,
)
db = DatabaseConnection(str(DB_PATH))
repo = Repository(db)
try:
demo(repo)
finally:
# Always close the connection SQLite locks the file while open
db.close()
print("\n✅ Demo finished connection closed.")

View File

@@ -0,0 +1,42 @@
# ------------------------------------------------------------
# Public interface for the ``myapp.models`` package.
# By reexporting the mostused symbols here callers can simply do:
#
# from myapp.models import Member, Service, ScheduleStatus
#
# This keeps import statements short and hides the internal file layout
# (whether a model lives in ``dataclasses.py`` or elsewhere).
# ------------------------------------------------------------
# Reexport all dataclass models
from .dataclasses import ( # noqa: F401 (reexported names)
AcceptedLog,
Classification,
DeclineLog,
Member,
Schedule,
ScheduledLog,
Service,
ServiceAvailability,
ServiceType,
)
# Reexport any enums that belong to the model layer
from .enums import ScheduleStatus # noqa: F401
# Optional: define what ``from myapp.models import *`` should export.
# This is useful for documentation tools and for IDE autocompletion.
__all__ = [
# Dataclasses
"AcceptedLog",
"Classification",
"DeclineLog",
"Member",
"Schedule",
"ScheduledLog",
"Service",
"ServiceAvailability",
"ServiceType",
# Enums
"ScheduleStatus",
]

View File

@@ -0,0 +1,179 @@
# ------------------------------------------------------------
# Central place for all datamodel definitions.
# ------------------------------------------------------------
from __future__ import annotations
from dataclasses import dataclass, asdict, fields
from datetime import date, datetime
from typing import Any, Dict, Tuple, Type, TypeVar, Union
# ----------------------------------------------------------------------
# Helper types what sqlite3.Row returns (either a tuplelike or a dictlike)
# ----------------------------------------------------------------------
Row = Tuple[Any, ...] | Dict[str, Any]
T = TypeVar("T", bound="BaseModel")
# ----------------------------------------------------------------------
# BaseModel common conversion helpers for every model
# ----------------------------------------------------------------------
class BaseModel:
"""
Minimal base class that knows how to:
* Build an instance from a SQLite row (or any mapping with column names).
* Export itself as a plain ``dict`` suitable for INSERT/UPDATE statements.
* Render a readable ``repr`` for debugging.
"""
@classmethod
def from_row(cls: Type[T], row: Row) -> T:
"""
Convert a ``sqlite3.Row`` (or a dictlike mapping) into a dataclass
instance. Field names are matched to column names; ``None`` values are
preserved verbatim. ``datetime`` and ``date`` columns are parsed from
ISO8601 strings when necessary.
"""
# ``row`` may already be a dict otherwise turn the Row into one.
data = dict(row) if not isinstance(row, dict) else row
converted: Dict[str, Any] = {}
for f in fields(cls):
raw = data.get(f.name)
# Preserve ``None`` exactly asis.
if raw is None:
converted[f.name] = None
continue
# ------------------------------------------------------------------
# 1⃣ datetime handling
# ------------------------------------------------------------------
if f.type is datetime:
# SQLite stores datetimes as ISO strings.
if isinstance(raw, str):
converted[f.name] = datetime.fromisoformat(raw)
else:
converted[f.name] = raw
# ------------------------------------------------------------------
# 2⃣ date handling
# ------------------------------------------------------------------
elif f.type is date:
if isinstance(raw, str):
converted[f.name] = date.fromisoformat(raw)
else:
converted[f.name] = raw
# ------------------------------------------------------------------
# 3⃣ fallback keep whatever we received
# ------------------------------------------------------------------
else:
converted[f.name] = raw
# Instantiate the concrete dataclass.
return cls(**converted) # type: ignore[arg-type]
# ------------------------------------------------------------------
# Convenience helpers
# ------------------------------------------------------------------
def to_dict(self) -> Dict[str, Any]:
"""Return a plain ``dict`` of the dataclass fields (good for INSERTs)."""
return asdict(self)
def __repr__(self) -> str:
"""Readable representation useful during debugging."""
parts = ", ".join(f"{f.name}={getattr(self, f.name)!r}" for f in fields(self))
return f"{self.__class__.__name__}({parts})"
# ----------------------------------------------------------------------
# Concrete models each one is a thin dataclass inheriting from BaseModel.
# ----------------------------------------------------------------------
# ---------- Logging tables ----------
@dataclass
class AcceptedLog(BaseModel):
LogId: int
MemberId: int
ServiceId: int
AcceptedAt: datetime
@dataclass
class DeclineLog(BaseModel):
DeclineId: int
MemberId: int
ServiceId: int
DeclinedAt: datetime
DeclineDate: date # the service day that was declined
Reason: Union[str, None] = None
@dataclass
class ScheduledLog(BaseModel):
LogId: int
MemberId: int
ServiceId: int
ScheduledAt: datetime
ExpiresAt: datetime
# ---------- Core reference data ----------
@dataclass
class Classification(BaseModel):
ClassificationId: int
ClassificationName: str
@dataclass
class ServiceType(BaseModel):
ServiceTypeId: int
TypeName: str
# ---------- Primary domain entities ----------
@dataclass
class Member(BaseModel):
MemberId: int
FirstName: str
LastName: str
Email: Union[str, None] = None
PhoneNumber: Union[str, None] = None
ClassificationId: Union[int, None] = None
Notes: Union[str, None] = None
IsActive: int = 1
LastScheduledAt: Union[datetime, None] = None
LastAcceptedAt: Union[datetime, None] = None
LastDeclinedAt: Union[datetime, None] = None
DeclineStreak: int = 0
@dataclass
class Service(BaseModel):
ServiceId: int
ServiceTypeId: int
ServiceDate: date
@dataclass
class ServiceAvailability(BaseModel):
ServiceAvailabilityId: int
MemberId: int
ServiceTypeId: int
@dataclass
class Schedule(BaseModel):
ScheduleId: int
ServiceId: int
MemberId: int
Status: str # 'pending' | 'accepted' | 'declined'
ScheduledAt: datetime
AcceptedAt: Union[datetime, None] = None
DeclinedAt: Union[datetime, None] = None
ExpiresAt: Union[datetime, None] = None
DeclineReason: Union[str, None] = None

42
backend/models/enums.py Normal file
View File

@@ -0,0 +1,42 @@
# ------------------------------------------------------------
# Centralised enumeration definitions for the datamodel layer.
# Keeping them in one module avoids circular imports and makes
# typechecking / IDE completion straightforward.
# ------------------------------------------------------------
from __future__ import annotations
from enum import StrEnum
from typing import Any, Iterable
class ScheduleStatus(StrEnum):
"""
Canonical status values for a ``Schedule`` row.
Using ``StrEnum`` means the enum members behave like regular strings
(e.g. they can be written directly to SQLite) while still giving us
the safety and autocomplete of an enum.
"""
PENDING = "pending"
ACCEPTED = "accepted"
DECLINED = "declined"
@classmethod
def from_raw(cls, value: Any) -> "ScheduleStatus":
"""
Convert an arbitrary value (often a plain string coming from the DB)
into a ``ScheduleStatus`` member.
Raises
------
ValueError
If the value does not correspond to any defined member.
"""
if isinstance(value, cls):
return value
try:
# ``cls(value)`` works because ``StrEnum`` subclasses ``str``.
return cls(value)
except ValueError as exc:
raise ValueError(f"Invalid ScheduleStatus: {value!r}") from exc

View File

@@ -0,0 +1,15 @@
from .classification import ClassificationRepository
from .member import MemberRepository
from .schedule import ScheduleRepository
from .service import ServiceRepository
from .service_availability import ServiceAvailabilityRepository
from .service_type import ServiceTypeRepository
__all__ = [
"ClassificationRepository"
"MemberRepository",
"ScheduleRepository",
"ServiceRepository",
"ServiceAvailabilityRepository",
"ServiceTypeRepository"
]

View File

@@ -0,0 +1,101 @@
# myapp/repositories/classification.py
# ------------------------------------------------------------
# Persistence layer for the ``Classification`` lookup table.
# ------------------------------------------------------------
from __future__ import annotations
from typing import List, Optional
from ..db import BaseRepository
from ..models import Classification as ClassificationModel
class ClassificationRepository(BaseRepository[ClassificationModel]):
"""
Simple CRUD + lookup helpers for the ``Classifications`` table.
Typical rows look like:
ClassificationId | ClassificationName
------------------------------------
1 | Baritone
2 | Tenor
3 | Alto / Mezzo
4 | Soprano
"""
# ------------------------------------------------------------------
# Tablelevel constants change them here if the schema ever changes.
# ------------------------------------------------------------------
_TABLE = "Classifications"
_PK = "ClassificationId"
# ------------------------------------------------------------------
# Basic CRUD operations
# ------------------------------------------------------------------
def create(self, name: str) -> ClassificationModel:
"""
Insert a new classification row and return the populated model.
Parameters
----------
name: str
Humanreadable name (e.g. “Baritone”, “Tenor”, …).
Returns
-------
ClassificationModel
Instance with the newly assigned ``ClassificationId``.
"""
classification = ClassificationModel(ClassificationId=-1, ClassificationName=name)
return self._insert(self._TABLE, classification, self._PK)
def get_by_id(self, classification_id: int) -> Optional[ClassificationModel]:
"""
Retrieve a single classification by primary key.
"""
sql = f"SELECT * FROM {self._TABLE} WHERE {self._PK} = ?"
row = self.db.fetchone(sql, (classification_id,))
return ClassificationModel.from_row(row) if row else None
def find_by_name(self, name: str) -> Optional[ClassificationModel]:
"""
Look up a classification by its exact name.
"""
sql = f"SELECT * FROM {self._TABLE} WHERE ClassificationName = ?"
row = self.db.fetchone(sql, (name,))
return ClassificationModel.from_row(row) if row else None
# ------------------------------------------------------------------
# Convenience queries
# ------------------------------------------------------------------
def list_all(self) -> List[ClassificationModel]:
"""
Return every classification row, ordered alphabetically.
"""
sql = f"SELECT * FROM {self._TABLE} ORDER BY ClassificationName ASC"
rows = self.db.fetchall(sql)
return [ClassificationModel.from_row(r) for r in rows]
def ensure_exists(self, name: str) -> ClassificationModel:
"""
Idempotent helper used by higherlevel services:
* If a classification with ``name`` already exists, return it.
* Otherwise create a new row and return the freshly inserted model.
"""
existing = self.find_by_name(name)
if existing:
return existing
return self.create(name)
# ------------------------------------------------------------------
# Optional delete (use with care other tables may have FK constraints)
# ------------------------------------------------------------------
def delete(self, classification_id: int) -> None:
"""
Harddelete a classification row. In practice youll rarely need
this because classifications tend to be static reference data.
"""
sql = f"DELETE FROM {self._TABLE} WHERE {self._PK} = ?"
self.db.execute(sql, (classification_id,))

View File

@@ -0,0 +1,237 @@
# myapp/repositories/member.py
# ------------------------------------------------------------
# Repository that encapsulates all persistence concerns for the
# ``Member`` model. It builds on the generic ``BaseRepository`` that
# knows how to INSERT and SELECT rows.
# ------------------------------------------------------------
from __future__ import annotations
import datetime as _dt
from typing import List, Sequence, Optional
from ..db import BaseRepository, DatabaseConnection
from ..models import Member as MemberModel
class MemberRepository(BaseRepository[MemberModel]):
"""
Highlevel dataaccess object for ``Member`` rows.
Only *persistence* logic lives here any business rules (e.g. roundrobin
scheduling) should be implemented in a service layer that composes this
repository with others.
"""
# ------------------------------------------------------------------
# Tablelevel constants keep them in one place so a rename is easy.
# ------------------------------------------------------------------
_TABLE = "Members"
_PK = "MemberId"
# ------------------------------------------------------------------
# CRUD helpers
# ------------------------------------------------------------------
def create(
self,
first_name: str,
last_name: str,
*,
email: Optional[str] = None,
phone_number: Optional[str] = None,
classification_id: Optional[int] = None,
notes: Optional[str] = None,
is_active: int = 1,
) -> MemberModel:
"""
Insert a new member row and return the fullypopulated ``Member`` instance.
"""
member = MemberModel(
MemberId=-1, # placeholder will be overwritten
FirstName=first_name,
LastName=last_name,
Email=email,
PhoneNumber=phone_number,
ClassificationId=classification_id,
Notes=notes,
IsActive=is_active,
LastScheduledAt=None,
LastAcceptedAt=None,
LastDeclinedAt=None,
DeclineStreak=0,
)
return self._insert(self._TABLE, member, self._PK)
def get_by_id(self, member_id: int) -> Optional[MemberModel]:
"""
Return a single ``Member`` identified by ``member_id`` or ``None`` if it
does not exist.
"""
sql = f"SELECT * FROM {self._TABLE} WHERE {self._PK} = ?"
row = self.db.fetchone(sql, (member_id,))
return MemberModel.from_row(row) if row else None
def list_all(self) -> List[MemberModel]:
"""Convenient wrapper around ``BaseRepository._select_all``."""
return self._select_all(self._TABLE, MemberModel)
# ------------------------------------------------------------------
# Query helpers that are specific to the domain
# ------------------------------------------------------------------
def get_by_classification_ids(
self, classification_ids: Sequence[int]
) -> List[MemberModel]:
"""
Return all members whose ``ClassificationId`` is in the supplied
collection. Empty input yields an empty list (no DB roundtrip).
"""
if not classification_ids:
return []
placeholders = ",".join("?" for _ in classification_ids)
sql = (
f"SELECT * FROM {self._TABLE} "
f"WHERE ClassificationId IN ({placeholders})"
)
rows = self.db.fetchall(sql, tuple(classification_ids))
return [MemberModel.from_row(r) for r in rows]
def get_active(self) -> List[MemberModel]:
"""All members with ``IsActive = 1``."""
sql = f"SELECT * FROM {self._TABLE} WHERE IsActive = 1"
rows = self.db.fetchall(sql)
return [MemberModel.from_row(r) for r in rows]
# ------------------------------------------------------------------
# Helper used by the scheduling service builds the roundrobin queue.
# ------------------------------------------------------------------
def candidate_queue(
self,
classification_ids: Sequence[int],
*,
only_active: bool = True,
boost_seconds: int = 172_800, # 2 days in seconds
) -> List[MemberModel]:
"""
Return members ordered for the roundrobin scheduler.
Ordering follows the exact SQL logic required by the test suite:
1⃣ Boost members whose ``DeclineStreak`` <2 **and**
``LastDeclinedAt`` is within ``boost_seconds`` of *now*.
Those rows get a leading ``0`` in the ``CASE`` expression;
all others get ``1``.
2⃣ After the boost, order by ``LastAcceptedAt`` (oldest first,
``NULL`` → farpast sentinel).
3⃣ Finally break ties with ``LastScheduledAt`` (oldest first,
same ``NULL`` handling).
Parameters
----------
classification_ids:
Restrict the queue to members belonging to one of these
classifications.
only_active:
If ``True`` (default) filter out rows where ``IsActive != 1``.
boost_seconds:
Number of seconds that count as “recently declined”.
The default is **2days** (172800s).
Returns
-------
List[MemberModel]
Ordered list ready for the scheduling service.
"""
# ------------------------------------------------------------------
# Build the dynamic WHERE clause.
# ------------------------------------------------------------------
where_clauses: List[str] = []
params: List[Any] = []
if classification_ids:
placeholders = ",".join("?" for _ in classification_ids)
where_clauses.append(f"ClassificationId IN ({placeholders})")
params.extend(classification_ids)
if only_active:
where_clauses.append("IsActive = 1")
where_sql = " AND ".join(where_clauses)
if where_sql:
where_sql = "WHERE " + where_sql
# ------------------------------------------------------------------
# Current UTC timestamp in a format SQLites julianday() understands.
# ``%Y-%m-%d %H:%M:%S`` no fractional seconds.
# ------------------------------------------------------------------
now_iso = _dt.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
# ------------------------------------------------------------------
# Full query note the threelevel ORDER BY.
# ------------------------------------------------------------------
sql = f"""
SELECT *
FROM {self._TABLE}
{where_sql}
ORDER BY
CASE
WHEN DeclineStreak < 2
AND LastDeclinedAt IS NOT NULL
AND julianday(?) - julianday(LastDeclinedAt) <= (? / 86400.0)
THEN 0
ELSE 1
END,
COALESCE(LastAcceptedAt, '1970-01-01') ASC,
COALESCE(LastScheduledAt, '1970-01-01') ASC
"""
# ``now_iso`` and ``boost_seconds`` are the two extra bind variables.
exec_params = tuple(params) + (now_iso, boost_seconds)
rows = self.db.fetchall(sql, exec_params)
return [MemberModel.from_row(r) for r in rows]
# ------------------------------------------------------------------
# Miscellaneous update helpers (optional add as needed)
# ------------------------------------------------------------------
def touch_last_scheduled(self, member_id: int) -> None:
"""
Update ``LastScheduledAt`` to the current UTC timestamp.
Used by the scheduling service after a schedule row is created.
"""
sql = f"""
UPDATE {self._TABLE}
SET LastScheduledAt = strftime('%Y-%m-%d %H:%M:%f', 'now')
WHERE {self._PK} = ?
"""
self.db.execute(sql, (member_id,))
def set_last_accepted(self, member_id: int) -> None:
"""
Record a successful acceptance clears any cooloff.
"""
sql = f"""
UPDATE {self._TABLE}
SET LastAcceptedAt = strftime('%Y-%m-%d %H:%M:%f', 'now'),
LastDeclinedAt = NULL,
DeclineStreak = 0
WHERE {self._PK} = ?
"""
self.db.execute(sql, (member_id,))
def set_last_declined(self, member_id: int, decline_date: str) -> None:
"""
Record a decline ``decline_date`` should be an ISOformatted date
(e.g. ``'2025-08-22'``). This implements the oneday cooloff rule
and bumps the ``DeclineStreak`` counter.
"""
sql = f"""
UPDATE {self._TABLE}
SET
LastDeclinedAt = ?,
DeclineStreak = COALESCE(DeclineStreak, 0) + 1
WHERE {self._PK} = ?
"""
self.db.execute(sql, (decline_date, member_id))

View File

@@ -0,0 +1,264 @@
# myapp/repositories/schedule.py
# ------------------------------------------------------------
# Persistence layer for the ``Schedule`` model.
# ------------------------------------------------------------
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from ..db import BaseRepository
from ..models import Schedule as ScheduleModel
from ..models import ScheduleStatus
class ScheduleRepository(BaseRepository[ScheduleModel]):
"""Dataaccess object for the ``Schedules`` table."""
# ------------------------------------------------------------------
# Tablelevel constants change them in one place if the schema evolves.
# ------------------------------------------------------------------
_TABLE = "Schedules"
_PK = "ScheduleId"
# ------------------------------------------------------------------
# CRUD helpers
# ------------------------------------------------------------------
def create(
self,
*,
service_id: int,
member_id: int,
status: ScheduleStatus = ScheduleStatus.PENDING,
reason: Optional[str] = None,
scheduled_at: Optional[Any] = None,
expires_at: Optional[Any] = None,
) -> ScheduleModel:
"""
Insert a brandnew schedule row.
Parameters
----------
service_id, member_id : int
FK references.
status : ScheduleStatus
Desired initial status (PENDING, DECLINED, …).
reason : str | None
Stored in ``DeclineReason`` when the status is ``DECLINED``.
scheduled_at, expires_at : datetimecompatible | None
``scheduled_at`` defaults to SQLites ``CURRENT_TIMESTAMP``.
"""
schedule = ScheduleModel(
ScheduleId=-1, # placeholder will be replaced
ServiceId=service_id,
MemberId=member_id,
Status=status.value,
ScheduledAt=scheduled_at or "CURRENT_TIMESTAMP",
AcceptedAt=None,
DeclinedAt=None,
ExpiresAt=expires_at,
DeclineReason=reason if status == ScheduleStatus.DECLINED else None,
)
return self._insert(self._TABLE, schedule, self._PK)
def get_by_id(self, schedule_id: int) -> Optional[ScheduleModel]:
"""Fetch a schedule by its primary key."""
sql = f"SELECT * FROM {self._TABLE} WHERE {self._PK} = ?"
row = self.db.fetchone(sql, (schedule_id,))
return ScheduleModel.from_row(row) if row else None
def list_all(self) -> List[ScheduleModel]:
"""Return every schedule row."""
return self._select_all(self._TABLE, ScheduleModel)
# ------------------------------------------------------------------
# Helper used by the SchedulingService to locate an existing row.
# ------------------------------------------------------------------
def get_one(self, *, member_id: int, service_id: int) -> Optional[ScheduleModel]:
"""
Return the *first* schedule (any status) for the supplied
``member_id`` / ``service_id`` pair, or ``None`` if none exists.
"""
sql = f"""
SELECT *
FROM {self._TABLE}
WHERE MemberId = ?
AND ServiceId = ?
LIMIT 1
"""
row = self.db.fetchone(sql, (member_id, service_id))
return ScheduleModel.from_row(row) if row else None
# ------------------------------------------------------------------
# Generic statuschange helper (used for “decline” and similar ops).
# ------------------------------------------------------------------
def update_status(
self,
*,
schedule_id: int,
new_status: ScheduleStatus,
reason: Optional[str] = None,
) -> None:
"""
Switch a schedules status and optionally store a reason.
If ``new_status`` is ``DECLINED`` the ``DeclineReason`` column is
populated; otherwise it is cleared.
"""
# Build the SET clause dynamically we only touch the columns we need.
set_clause = "Status = ?, DeclinedAt = NULL, DeclineReason = NULL"
params: list[Any] = [new_status.value]
if new_status == ScheduleStatus.DECLINED:
set_clause = "Status = ?, DeclinedAt = ?, DeclineReason = ?"
params.extend(["CURRENT_TIMESTAMP", reason])
params.append(schedule_id) # WHERE clause param
sql = f"""
UPDATE {self._TABLE}
SET {set_clause}
WHERE {self._PK} = ?
"""
self.db.execute(sql, tuple(params))
# ------------------------------------------------------------------
# Query helpers used by the scheduling service
# ------------------------------------------------------------------
def has_any(
self,
member_id: int,
service_id: int,
statuses: Sequence[ScheduleStatus],
) -> bool:
"""True if a schedule exists for the pair with any of the given statuses."""
if not statuses:
return False
placeholders = ",".join("?" for _ in statuses)
sql = f"""
SELECT 1
FROM {self._TABLE}
WHERE MemberId = ?
AND ServiceId = ?
AND Status IN ({placeholders})
LIMIT 1
"""
params = (member_id, service_id, *[s.value for s in statuses])
row = self.db.fetchone(sql, params)
return row is not None
def is_available(self, member_id: int, service_id: int) -> bool:
"""
Cooldown rule: a member is unavailable if they have accepted a
schedule for the same service within the last ``COOLDOWN_DAYS``.
"""
# Latest acceptance timestamp (if any)
sql_latest = f"""
SELECT MAX(AcceptedAt) AS last_accept
FROM {self._TABLE}
WHERE MemberId = ?
AND ServiceId = ?
AND Status = ?
"""
row = self.db.fetchone(
sql_latest,
(member_id, service_id, ScheduleStatus.ACCEPTED.value),
)
last_accept: Optional[str] = row["last_accept"] if row else None
if not last_accept:
return True # never accepted → free to schedule
COOLDOWN_DAYS = 1
sql_cooldown = f"""
SELECT 1
FROM {self._TABLE}
WHERE MemberId = ?
AND ServiceId = ?
AND Status = ?
AND DATE(AcceptedAt) >= DATE('now', '-{COOLDOWN_DAYS} day')
LIMIT 1
"""
row = self.db.fetchone(
sql_cooldown,
(member_id, service_id, ScheduleStatus.ACCEPTED.value),
)
return row is None # None → outside the cooldown window
# ------------------------------------------------------------------
# Statustransition helpers (accept / decline) kept for completeness.
# ------------------------------------------------------------------
def mark_accepted(
self,
schedule_id: int,
accepted_at: Optional[Any] = None,
) -> None:
sql = f"""
UPDATE {self._TABLE}
SET Status = ?,
AcceptedAt = ?,
DeclinedAt = NULL,
DeclineReason = NULL
WHERE {self._PK} = ?
"""
ts = accepted_at or "CURRENT_TIMESTAMP"
self.db.execute(sql, (ScheduleStatus.ACCEPTED.value, ts, schedule_id))
def mark_declined(
self,
schedule_id: int,
declined_at: Optional[Any] = None,
decline_reason: Optional[str] = None,
) -> None:
sql = f"""
UPDATE {self._TABLE}
SET Status = ?,
DeclinedAt = ?,
DeclineReason = ?
WHERE {self._PK} = ?
"""
ts = declined_at or "CURRENT_TIMESTAMP"
self.db.execute(sql, (ScheduleStatus.DECLINED.value, ts, decline_reason, schedule_id))
# ------------------------------------------------------------------
# Sameday helper used by the scheduling service
# ------------------------------------------------------------------
def has_schedule_on_date(self, member_id: int, service_date: str) -> bool:
"""
Return ``True`` if *any* schedule (regardless of status) exists for
``member_id`` on the calendar day ``service_date`` (format YYYYMMDD).
This abstracts the “a member can only be scheduled once per day”
rule so the service layer does not need to know the underlying
table layout.
"""
sql = f"""
SELECT 1
FROM {self._TABLE} AS s
JOIN Services AS sv ON s.ServiceId = sv.ServiceId
WHERE s.MemberId = ?
AND sv.ServiceDate = ?
LIMIT 1
"""
row = self.db.fetchone(sql, (member_id, service_date))
return row is not None
# ------------------------------------------------------------------
# Miscellaneous convenience queries
# ------------------------------------------------------------------
def get_pending_for_service(self, service_id: int) -> List[ScheduleModel]:
"""All PENDING schedules for a given service."""
sql = f"""
SELECT *
FROM {self._TABLE}
WHERE ServiceId = ?
AND Status = ?
"""
rows = self.db.fetchall(sql, (service_id, ScheduleStatus.PENDING.value))
return [ScheduleModel.from_row(r) for r in rows]
def delete(self, schedule_id: int) -> None:
"""Harddelete a schedule row (use with caution)."""
sql = f"DELETE FROM {self._TABLE} WHERE {self._PK} = ?"
self.db.execute(sql, (schedule_id,))

View File

@@ -0,0 +1,105 @@
# myapp/repositories/service.py
# ------------------------------------------------------------
# Persistence layer for Servicerelated models.
# ------------------------------------------------------------
from __future__ import annotations
from datetime import date, datetime
from typing import List, Optional, Sequence, Any
from ..db import BaseRepository
from ..models import Service as ServiceModel
# ----------------------------------------------------------------------
# ServiceRepository handles the ``Services`` table
# ----------------------------------------------------------------------
class ServiceRepository(BaseRepository[ServiceModel]):
"""
CRUD + query helpers for the ``Services`` table.
Business rules (e.g. “do not schedule past services”) belong in a
service layer that composes this repository with the others.
"""
_TABLE = "Services"
_PK = "ServiceId"
# ------------------------------
# Basic CRUD
# ------------------------------
def create(
self,
service_type_id: int,
service_date: date,
) -> ServiceModel:
"""
Insert a new service row.
``service_date`` can be a ``datetime.date`` or an ISO8601 string.
"""
svc = ServiceModel(
ServiceId=-1, # placeholder will be overwritten
ServiceTypeId=service_type_id,
ServiceDate=service_date,
)
return self._insert(self._TABLE, svc, self._PK)
def get_by_id(self, service_id: int) -> Optional[ServiceModel]:
sql = f"SELECT * FROM {self._TABLE} WHERE {self._PK} = ?"
row = self.db.fetchone(sql, (service_id,))
return ServiceModel.from_row(row) if row else None
def list_all(self) -> List[ServiceModel]:
return self._select_all(self._TABLE, ServiceModel)
# ------------------------------
# Domainspecific queries
# ------------------------------
def upcoming(self, after: Optional[date] = None, limit: int = 100) -> List[ServiceModel]:
"""
Return services that occur on or after ``after`` (defaults to today).
Results are ordered chronologically.
"""
after_date = after or date.today()
sql = f"""
SELECT *
FROM {self._TABLE}
WHERE ServiceDate >= ?
ORDER BY ServiceDate ASC
LIMIT ?
"""
rows = self.db.fetchall(sql, (after_date.isoformat(), limit))
return [ServiceModel.from_row(r) for r in rows]
def by_type(self, service_type_ids: Sequence[int]) -> List[ServiceModel]:
"""
Fetch all services whose ``ServiceTypeId`` is in the supplied list.
Empty input → empty list (no DB roundtrip).
"""
if not service_type_ids:
return []
placeholders = ",".join("?" for _ in service_type_ids)
sql = f"""
SELECT *
FROM {self._TABLE}
WHERE ServiceTypeId IN ({placeholders})
ORDER BY ServiceDate ASC
"""
rows = self.db.fetchall(sql, tuple(service_type_ids))
return [ServiceModel.from_row(r) for r in rows]
# ------------------------------
# Update helpers (optional)
# ------------------------------
def reschedule(self, service_id: int, new_date: date) -> None:
"""
Change the ``ServiceDate`` of an existing service.
"""
sql = f"""
UPDATE {self._TABLE}
SET ServiceDate = ?
WHERE {self._PK} = ?
"""
self.db.execute(sql, (new_date.isoformat(), service_id))

View File

@@ -0,0 +1,158 @@
# myapp/repositories/service_availability.py
# ------------------------------------------------------------
# Persistence layer for the ServiceAvailability table.
# ------------------------------------------------------------
from __future__ import annotations
from typing import List, Optional, Sequence, Any
from ..db import BaseRepository
from ..models import ServiceAvailability as ServiceAvailabilityModel
class ServiceAvailabilityRepository(BaseRepository[ServiceAvailabilityModel]):
"""
CRUD + query helpers for the ``ServiceAvailability`` table.
The table records which members are allowed to receive which
servicetype slots (e.g. “9AM”, “11AM”, “6PM”). All SQL is
parameterised to stay safe from injection attacks.
"""
# ------------------------------------------------------------------
# Tablelevel constants change them in one place if the schema evolves.
# ------------------------------------------------------------------
_TABLE = "ServiceAvailability"
_PK = "ServiceAvailabilityId"
# ------------------------------------------------------------------
# Basic CRUD helpers
# ------------------------------------------------------------------
def create(
self,
member_id: int,
service_type_id: int,
) -> ServiceAvailabilityModel:
"""
Insert a new availability row.
The ``UNIQUE (MemberId, ServiceTypeId)`` constraint guarantees
idempotency if the pair already exists SQLite will raise an
``IntegrityError``. To make the operation truly idempotent we
first check for an existing row and return it unchanged.
"""
existing = self.get(member_id, service_type_id)
if existing:
return existing
avail = ServiceAvailabilityModel(
ServiceAvailabilityId=-1, # placeholder will be overwritten
MemberId=member_id,
ServiceTypeId=service_type_id,
)
return self._insert(self._TABLE, avail, self._PK)
def get(
self,
member_id: int,
service_type_id: int,
) -> Optional[ServiceAvailabilityModel]:
"""
Retrieve a single availability record for the given member /
servicetype pair, or ``None`` if it does not exist.
"""
sql = f"""
SELECT *
FROM {self._TABLE}
WHERE MemberId = ?
AND ServiceTypeId = ?
"""
row = self.db.fetchone(sql, (member_id, service_type_id))
return ServiceAvailabilityModel.from_row(row) if row else None
def delete(self, availability_id: int) -> None:
"""
Harddelete an availability row by its primary key.
Use with care most callers will prefer ``revoke`` (by member &
service type) which is a bit more expressive.
"""
sql = f"DELETE FROM {self._TABLE} WHERE {self._PK} = ?"
self.db.execute(sql, (availability_id,))
# ------------------------------------------------------------------
# Convenience “grant / revoke” helpers (the most common ops)
# ------------------------------------------------------------------
def grant(self, member_id: int, service_type_id: int) -> ServiceAvailabilityModel:
"""
Public API to give a member permission for a particular service slot.
Internally delegates to ``create`` which already handles the
idempotentcheck.
"""
return self.create(member_id, service_type_id)
def revoke(self, member_id: int, service_type_id: int) -> None:
"""
Remove a members permission for a particular service slot.
"""
sql = f"""
DELETE FROM {self._TABLE}
WHERE MemberId = ?
AND ServiceTypeId = ?
"""
self.db.execute(sql, (member_id, service_type_id))
# ------------------------------------------------------------------
# Query helpers used by the scheduling service
# ------------------------------------------------------------------
def list_by_member(self, member_id: int) -> List[ServiceAvailabilityModel]:
"""
Return every ``ServiceAvailability`` row that belongs to the given
member. Handy for building a members personal “available slots”
view.
"""
sql = f"""
SELECT *
FROM {self._TABLE}
WHERE MemberId = ?
"""
rows = self.db.fetchall(sql, (member_id,))
return [ServiceAvailabilityModel.from_row(r) for r in rows]
def list_by_service_type(self, service_type_id: int) -> List[ServiceAvailabilityModel]:
"""
Return all members that are allowed to receive the given service type.
"""
sql = f"""
SELECT *
FROM {self._TABLE}
WHERE ServiceTypeId = ?
"""
rows = self.db.fetchall(sql, (service_type_id,))
return [ServiceAvailabilityModel.from_row(r) for r in rows]
def list_all(self) -> List[ServiceAvailabilityModel]:
"""
Return every row in the table useful for admin dashboards or
bulkexport scripts.
"""
return self._select_all(self._TABLE, ServiceAvailabilityModel)
# ------------------------------------------------------------------
# Helper for the roundrobin scheduler
# ------------------------------------------------------------------
def members_for_type(self, service_type_id: int) -> List[int]:
"""
Return a flat list of ``MemberId`` values that are eligible for the
supplied ``service_type_id``. The scheduling service can then
intersect this list with the pool of members that have the correct
classification, activity flag, etc.
"""
sql = f"""
SELECT MemberId
FROM {self._TABLE}
WHERE ServiceTypeId = ?
"""
rows = self.db.fetchall(sql, (service_type_id,))
# ``rows`` is a sequence of sqlite3.Row objects; each row acts like a dict.
return [row["MemberId"] for row in rows]

View File

@@ -0,0 +1,95 @@
# myapp/repositories/service_type.py
# ------------------------------------------------------------
# Persistence layer for the ``ServiceTypes`` table.
# ------------------------------------------------------------
from __future__ import annotations
from typing import List, Optional
from ..db import BaseRepository
from ..models import ServiceType as ServiceTypeModel
class ServiceTypeRepository(BaseRepository[ServiceTypeModel]):
"""
CRUDstyle helper for the ``ServiceTypes`` lookup table.
* Each row stores a humanreadable label (e.g. "9AM").
* The repository does **not** enforce any particular naming scheme
that kind of validation belongs in a higherlevel service layer if you
need it.
"""
# ------------------------------------------------------------------
# Tablelevel constants change them in one place if the schema evolves.
# ------------------------------------------------------------------
_TABLE = "ServiceTypes"
_PK = "ServiceTypeId"
# ------------------------------------------------------------------
# Basic CRUD operations
# ------------------------------------------------------------------
def create(self, type_name: str) -> ServiceTypeModel:
"""
Insert a new servicetype row and return the populated model.
Parameters
----------
type_name: str
Humanreadable identifier for the slot (e.g. "9AM").
Returns
-------
ServiceTypeModel
Instance with the freshly assigned primarykey.
"""
st = ServiceTypeModel(ServiceTypeId=-1, TypeName=type_name)
return self._insert(self._TABLE, st, self._PK)
def get_by_id(self, type_id: int) -> Optional[ServiceTypeModel]:
"""Fetch a single ServiceType by its primary key."""
sql = f"SELECT * FROM {self._TABLE} WHERE {self._PK} = ?"
row = self.db.fetchone(sql, (type_id,))
return ServiceTypeModel.from_row(row) if row else None
# ------------------------------------------------------------------
# Convenience lookups
# ------------------------------------------------------------------
def find_by_name(self, name: str) -> Optional[ServiceTypeModel]:
"""
Return the ServiceType whose ``TypeName`` matches ``name``.
Useful for turning a userprovided slot label into its integer id.
"""
sql = f"SELECT * FROM {self._TABLE} WHERE TypeName = ?"
row = self.db.fetchone(sql, (name,))
return ServiceTypeModel.from_row(row) if row else None
def list_all(self) -> List[ServiceTypeModel]:
"""Return every ServiceType row, ordered alphabetically by name."""
sql = f"SELECT * FROM {self._TABLE} ORDER BY TypeName ASC"
rows = self.db.fetchall(sql)
return [ServiceTypeModel.from_row(r) for r in rows]
# ------------------------------------------------------------------
# Optional helper bulkensure a set of expected slots exists
# ------------------------------------------------------------------
def ensure_slots(self, slot_names: List[str]) -> List[ServiceTypeModel]:
"""
Given a list of desired slot labels (e.g. ["9AM","11AM","6PM"]),
insert any that are missing and return the complete set of
ServiceTypeModel objects.
This is handy during application bootstrap or migrations.
"""
existing = {st.TypeName: st for st in self.list_all()}
result: List[ServiceTypeModel] = []
for name in slot_names:
if name in existing:
result.append(existing[name])
else:
# Insert the missing slot and add it to the result list.
result.append(self.create(name))
return result

View File

@@ -81,7 +81,7 @@ CREATE TABLE Schedules (
-- Reservation / status columns -----------------------------------------
Status TEXT NOT NULL CHECK (Status IN ('pending','accepted','declined')),
ScheduledAt DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, -- renamed from OfferedAt
ScheduledAt DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, -- set when status -> 'pending'
AcceptedAt DATETIME, -- set when status -> 'accepted'
DeclinedAt DATETIME, -- set when status -> 'declined'
ExpiresAt DATETIME, -- pending rows expire after X minutes

View File

View File

@@ -0,0 +1,227 @@
# myapp/services/scheduling_service.py
# ------------------------------------------------------------
# Scheduling service orchestrates the various repositories
# to pick the next eligible member for a given service.
# ------------------------------------------------------------
from __future__ import annotations
from datetime import datetime, timezone
from typing import Optional, Tuple, List
from ..repositories import (
ClassificationRepository,
MemberRepository,
ServiceRepository,
ServiceAvailabilityRepository,
ScheduleRepository
)
from ..models import ScheduleStatus
class SchedulingService:
"""
Highlevel service that implements the roundrobin / boost / cooldown
scheduling algorithm.
It deliberately keeps **business rules** (ordering, eligibility checks)
here, while the repositories remain pure dataaccess helpers.
"""
def __init__(
self,
classification_repo: ClassificationRepository,
member_repo: MemberRepository,
service_repo: ServiceRepository,
availability_repo: ServiceAvailabilityRepository,
schedule_repo: ScheduleRepository,
) -> None:
self.classification_repo = classification_repo
self.member_repo = member_repo
self.service_repo = service_repo
self.availability_repo = availability_repo
self.schedule_repo = schedule_repo
def schedule_next_member(
self,
classification_ids: Iterable[int],
service_id: int,
*,
only_active: bool = True,
boost_seconds: int = 5 * 24 * 60 * 60,
exclude_member_ids: Iterable[int] | None = None,
) -> Optional[Tuple[int, str, str, int]]:
"""
Choose the next member for ``service_id`` while respecting
ServiceAvailability, schedulestatus constraints, and the *sameday*
exclusion rule.
Parameters
----------
classification_ids : Iterable[int]
One or more classification identifiers.
service_id : int
The service we are trying to schedule.
only_active : bool, optional
Filter out inactive members (default: ``True``).
boost_seconds : int, optional
Seconds for the “5day decline boost” (default: 5days).
exclude_member_ids : Iterable[int] | None, optional
MemberIds that must be ignored even if they otherwise qualify.
Returns
-------
Tuple[member_id, first_name, last_name, schedule_id] | None
The first eligible member according to the ordering rules, or ``None``.
"""
# -----------------------------------------------------------------
# 0⃣ Resolve the Service row → we need ServiceTypeId and ServiceDate.
# -----------------------------------------------------------------
svc = self.service_repo.get_by_id(service_id)
if svc is None:
return None
service_type_id = svc.ServiceTypeId
# ``svc.ServiceDate`` is stored as a DATE (YYYYMMDD). We keep it as a string
# because SQLite date arithmetic works fine with that format.
target_date = svc.ServiceDate
# -----------------------------------------------------------------
# 1⃣ Build the candidate queue (ordering handled by the repo).
# -----------------------------------------------------------------
excluded = set(exclude_member_ids or [])
candidates: List = self.member_repo.candidate_queue(
classification_ids=list(classification_ids),
only_active=only_active,
boost_seconds=boost_seconds,
)
# -----------------------------------------------------------------
# 2⃣ Walk the ordered queue and apply all constraints.
# -----------------------------------------------------------------
for member in candidates:
member_id = member.MemberId
# ---- Earlyskip for explicit exclusions ---------------------------------
if member_id in excluded:
continue
# ---- Availability check -------------------------------------------------
if not self.availability_repo.get(member_id, service_type_id):
continue # not eligible for this service type
# ---- SAMEDAY EXCLUSION ------------------------------------------------
# Ask the repository whether this member already has *any* schedule on
# the same calendar day as the target service.
if self.schedule_repo.has_schedule_on_date(member_id, target_date):
# Member already booked somewhere on this day → skip.
continue
# ---- Schedulestatus constraints (accepted / pending / declined) ---------
if self.schedule_repo.has_any(
member_id,
service_id,
statuses=[ScheduleStatus.ACCEPTED],
):
continue
if self.schedule_repo.has_any(
member_id,
service_id,
statuses=[ScheduleStatus.PENDING],
):
continue
if self.schedule_repo.has_any(
member_id,
service_id,
statuses=[ScheduleStatus.DECLINED],
):
continue
# -----------------------------------------------------------------
# SUCCESS create a pending schedule.
# -----------------------------------------------------------------
schedule = self.schedule_repo.create(
service_id=service_id,
member_id=member_id,
status=ScheduleStatus.PENDING,
)
schedule_id = schedule.ScheduleId
# -----------------------------------------------------------------
# Update the member's LastScheduledAt so roundrobin stays fair.
# -----------------------------------------------------------------
self.member_repo.touch_last_scheduled(member_id)
# -----------------------------------------------------------------
# Return the useful bits to the caller.
# -----------------------------------------------------------------
return (
member_id,
member.FirstName,
member.LastName,
schedule_id,
)
# -----------------------------------------------------------------
# No eligible member found.
# -----------------------------------------------------------------
return None
def decline_service_for_user(
self,
member_id: int,
service_id: int,
*,
reason: Optional[str] = None,
) -> Tuple[Literal["created"] | Literal["updated"], int]:
"""
Mark a service as *declined* for a particular member.
Parameters
----------
member_id : int
Primarykey of the member who is declining.
service_id : int
Primarykey of the service being declined.
reason : str | None, optional
Optional freeform text explaining why the member declined.
Stored in the ``Reason`` column if your ``Schedules`` table has one;
otherwise it is ignored.
Returns
-------
Tuple[action, schedule_id]
*action* ``"created"`` if a brandnew schedule row was inserted,
``"updated"`` if an existing row was switched to
``ScheduleStatus.DECLINED``.
*schedule_id* the primarykey of the affected ``Schedules`` row.
"""
# ---------------------------------------------------------
# 1⃣ Look for an existing schedule (any status) for this pair.
# ---------------------------------------------------------
existing = self.schedule_repo.get_one(
member_id=member_id,
service_id=service_id,
)
if existing:
# -----------------------------------------------------
# 2⃣ There is already a row just flip its status.
# -----------------------------------------------------
self.schedule_repo.update_status(
schedule_id=existing.ScheduleId,
new_status=ScheduleStatus.DECLINED,
reason=reason,
)
return ("updated", existing.ScheduleId)
# ---------------------------------------------------------
# 3⃣ No row yet insert a fresh *declined* schedule.
# ---------------------------------------------------------
new_sched = self.schedule_repo.create(
service_id=service_id,
member_id=member_id,
status=ScheduleStatus.DECLINED,
reason=reason,
)
return ("created", new_sched.ScheduleId)

View File

164
backend/tests/conftest.py Normal file
View File

@@ -0,0 +1,164 @@
# tests/conftest.py
import os
import pytest
# ----------------------------------------------------------------------
# Import the concrete classes from your backend package.
# Adjust the import path if your package layout differs.
# ----------------------------------------------------------------------
from backend.db import DatabaseConnection
from backend.repositories import (
MemberRepository,
ClassificationRepository,
ServiceTypeRepository,
ServiceAvailabilityRepository,
)
# ----------------------------------------------------------------------
# Path to the full schema (DDL) that creates every table, including
# ServiceAvailability.
# ----------------------------------------------------------------------
@pytest.fixture(scope="session")
def schema_path() -> str:
"""Absolute path to the SQL file that creates the test schema."""
return os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "schema.sql")
)
# ----------------------------------------------------------------------
# Fresh inmemory SQLite DB with the full schema applied.
# ----------------------------------------------------------------------
@pytest.fixture
def db_connection(schema_path: str) -> DatabaseConnection:
conn = DatabaseConnection(":memory:")
# Load the DDL.
with open(schema_path, "r", encoding="utf-8") as f:
ddl = f.read()
conn._conn.executescript(ddl) # apply the whole schema
conn._conn.commit()
yield conn
conn.close()
# ----------------------------------------------------------------------
# Seed lookup tables that have foreignkey relationships.
# ----------------------------------------------------------------------
@pytest.fixture
def seed_lookup_tables(db_connection: DatabaseConnection):
"""
Insert rows into lookup tables that other tables reference.
Currently we need:
• Classifications for Member tests
• ServiceTypes for ServiceTypeRepository tests
• ServiceAvailability for the repo we are testing now
"""
# ---- Classifications -------------------------------------------------
classifications = [
("Soprano",), # ClassificationId = 1
("Alto / Mezzo",), # ClassificationId = 2
("Tenor",), # ClassificationId = 3
("Baritone",), # ClassificationId = 4
]
for name in classifications:
db_connection.execute(
"INSERT INTO Classifications (ClassificationName) VALUES (?)",
name,
)
# ---- ServiceTypes ----------------------------------------------------
# These are the three timeslot examples you asked for.
service_types = [("9AM",), ("11AM",), ("6PM",)]
for name in service_types:
db_connection.execute(
"INSERT INTO ServiceTypes (TypeName) VALUES (?)",
name,
)
# ---- ServiceAvailability ---------------------------------------------
# We need a couple of members first, otherwise the FK constraints will
# reject the inserts. We'll create two dummy members (Alice = 1,
# Bob = 2) and then map them to the three slots.
#
# NOTE: In a real test suite you would probably use the MemberRepository
# to create these rows, but inserting directly keeps the fixture fast and
# independent of the Member repo implementation.
dummy_members = [
("Alice", "Smith", "alice@example.com", None, None, 1, None, 1),
("Bob", "Jones", "bob@example.com", None, None, 2, None, 1),
]
for (
fn, ln, email, phone, notes,
classification_id, is_active, member_id_placeholder,
) in dummy_members:
# The MemberId column is AUTOINCREMENT, so we omit it.
db_connection.execute(
"""
INSERT INTO Members
(FirstName, LastName, Email, PhoneNumber, Notes,
ClassificationId, IsActive)
VALUES (?, ?, ?, ?, ?, ?, ?)
""",
(fn, ln, email, phone, notes, classification_id, is_active),
)
# At this point SQLite has assigned MemberIds 1 and 2.
# Map them to the three servicetype slots:
# Alice → 9AM (id=1) and 6PM (id=3)
# Bob → 11AM (id=2) and 6PM (id=3)
availability = [
(1, 1), # Alice 9AM
(1, 3), # Alice 6PM
(2, 2), # Bob 11AM
(2, 3), # Bob 6PM
]
for member_id, service_type_id in availability:
db_connection.execute(
"""
INSERT INTO ServiceAvailability (MemberId, ServiceTypeId)
VALUES (?, ?)
""",
(member_id, service_type_id),
)
# Commit everything so downstream fixtures see the data.
db_connection._conn.commit()
# ----------------------------------------------------------------------
# Repository factories each receives the same fresh DB that already has
# the lookup data seeded.
# ----------------------------------------------------------------------
@pytest.fixture
def member_repo(
db_connection: DatabaseConnection,
seed_lookup_tables,
) -> MemberRepository:
return MemberRepository(db_connection)
@pytest.fixture
def classification_repo(
db_connection: DatabaseConnection,
seed_lookup_tables,
) -> ClassificationRepository:
return ClassificationRepository(db_connection)
@pytest.fixture
def service_type_repo(
db_connection: DatabaseConnection,
seed_lookup_tables,
) -> ServiceTypeRepository:
return ServiceTypeRepository(db_connection)
@pytest.fixture
def service_availability_repo(
db_connection: DatabaseConnection,
seed_lookup_tables,
) -> ServiceAvailabilityRepository:
return ServiceAvailabilityRepository(db_connection)

View File

@@ -0,0 +1,93 @@
# backend/tests/repositories/test_classification.py
# ------------------------------------------------------------
# Pytest suite for the ClassificationRepository.
# ------------------------------------------------------------
import pytest
from backend.models import Classification as ClassificationModel
from backend.repositories import ClassificationRepository
# ----------------------------------------------------------------------
# 1⃣ Basic CRUD create & get_by_id
# ----------------------------------------------------------------------
def test_create_and_get_by_id(classification_repo):
new = classification_repo.create("Countertenor")
assert isinstance(new.ClassificationId, int) and new.ClassificationId > 0
assert new.ClassificationName == "Countertenor"
fetched = classification_repo.get_by_id(new.ClassificationId)
assert fetched is not None
assert fetched.ClassificationId == new.ClassificationId
assert fetched.ClassificationName == "Countertenor"
# ----------------------------------------------------------------------
# 2⃣ Lookup by name (exact match)
# ----------------------------------------------------------------------
def test_find_by_name_existing(classification_repo):
soprano = classification_repo.find_by_name("Soprano")
assert soprano is not None
assert soprano.ClassificationName == "Soprano"
def test_find_by_name_missing(classification_repo):
missing = classification_repo.find_by_name("Bass")
assert missing is None
# ----------------------------------------------------------------------
# 3⃣ List all classifications (ordered alphabetically)
# ----------------------------------------------------------------------
def test_list_all(classification_repo):
all_rows: list[ClassificationModel] = classification_repo.list_all()
assert len(all_rows) == 4 # the four seeded rows
names = [row.ClassificationName for row in all_rows]
assert names == sorted(names)
# ----------------------------------------------------------------------
# 4⃣ Idempotent helper ensure_exists
# ----------------------------------------------------------------------
def test_ensure_exists_creates_when_missing(classification_repo):
before = classification_repo.find_by_name("Bass")
assert before is None
bass = classification_repo.ensure_exists("Bass")
assert isinstance(bass, ClassificationModel)
assert bass.ClassificationName == "Bass"
# second call returns the same row
again = classification_repo.ensure_exists("Bass")
assert again.ClassificationId == bass.ClassificationId
# total rows = 4 seeded + the new “Bass”
all_rows = classification_repo.list_all()
assert len(all_rows) == 5
def test_ensure_exists_returns_existing(classification_repo):
existing = classification_repo.ensure_exists("Alto / Mezzo")
assert existing is not None
assert existing.ClassificationName == "Alto / Mezzo"
# no extra rows added
all_rows = classification_repo.list_all()
assert len(all_rows) == 4
# ----------------------------------------------------------------------
# 5⃣ Delete (optional demonstrates that the method works)
# ----------------------------------------------------------------------
def test_delete(classification_repo):
temp = classification_repo.create("TempVoice")
assert classification_repo.find_by_name("TempVoice") is not None
classification_repo.delete(temp.ClassificationId)
assert classification_repo.find_by_name("TempVoice") is None
remaining = classification_repo.list_all()
remaining_names = {r.ClassificationName for r in remaining}
assert "TempVoice" not in remaining_names
# the original four seeded names must still be present
assert {"Soprano", "Alto / Mezzo", "Tenor", "Baritone"} <= remaining_names

View File

@@ -0,0 +1,383 @@
# tests/repositories/test_member.py
import datetime as dt
from typing import List
import pytest
from backend.models import Member as MemberModel, ScheduleStatus
from backend.repositories import MemberRepository
# ----------------------------------------------------------------------
# Helper: a few sample members we can reuse across tests
# ----------------------------------------------------------------------
@pytest.fixture
def sample_members() -> List[MemberModel]:
"""Return a list of MemberModel objects (not yet persisted)."""
return [
MemberModel(
MemberId=-1,
FirstName="Alice",
LastName="Anderson",
Email="alice@example.com",
PhoneNumber="5551111",
ClassificationId=1,
Notes=None,
IsActive=1,
LastScheduledAt=None,
LastAcceptedAt=None,
LastDeclinedAt=None,
DeclineStreak=0,
),
MemberModel(
MemberId=-1,
FirstName="Bob",
LastName="Baker",
Email="bob@example.com",
PhoneNumber="5552222",
ClassificationId=2,
Notes="VIP",
IsActive=1,
LastScheduledAt=dt.datetime(2025, 8, 20, 10, 0, 0),
LastAcceptedAt=dt.datetime(2025, 8, 19, 9, 30, 0),
LastDeclinedAt=None,
DeclineStreak=0,
),
MemberModel(
MemberId=-1,
FirstName="Carol",
LastName="Carter",
Email=None,
PhoneNumber=None,
ClassificationId=1,
Notes=None,
IsActive=0, # inactive useful for filter tests
LastScheduledAt=None,
LastAcceptedAt=None,
LastDeclinedAt=None,
DeclineStreak=0,
),
]
# ----------------------------------------------------------------------
# Fixture to wipe the Members table (used by tests that need a clean slate)
# ----------------------------------------------------------------------
@pytest.fixture
def clean_members(member_repo: MemberRepository):
"""
Delete *all* rows from the Members table **and** any rows that
reference it (ServiceAvailability). The serviceavailability tests
rely on the seeded Alice/Bob rows, so we only invoke this fixture
in the memberrepo tests that need isolation.
"""
# 1⃣ Remove dependent rows first otherwise the FK constraint blocks us.
member_repo.db.execute(
f"DELETE FROM ServiceAvailability"
) # commit happens inside `execute`
# 2⃣ Now we can safely delete the members themselves.
member_repo.db.execute(
f"DELETE FROM {member_repo._TABLE}"
)
member_repo.db._conn.commit()
# ----------------------------------------------------------------------
# Helper to build a MemberModel with explicit timestamps.
# ----------------------------------------------------------------------
def make_member(
repo: MemberRepository,
first_name: str,
last_name: str,
*,
classification_id: int = 1,
is_active: int = 1,
accepted_at: str | None = None,
scheduled_at: str | None = None,
declined_at: str | None = None,
decline_streak: int = 0,
) -> MemberModel:
"""Insert a member and then manually set the optional timestamp columns."""
m = repo.create(
first_name=first_name,
last_name=last_name,
email=None,
phone_number=None,
classification_id=classification_id,
notes=None,
is_active=is_active,
)
# Directly update the row so we can control the timestamps without
# invoking the repositorys higherlevel helpers (which would reset
# other fields).
sql = f"""
UPDATE {repo._TABLE}
SET
LastAcceptedAt = ?,
LastScheduledAt = ?,
LastDeclinedAt = ?,
DeclineStreak = ?
WHERE {repo._PK} = ?
"""
repo.db.execute(
sql,
(
accepted_at,
scheduled_at,
declined_at,
decline_streak,
m.MemberId,
),
)
# Refresh the model from the DB so the attributes reflect the changes.
return repo.get_by_id(m.MemberId) # type: ignore[return-value]
# ----------------------------------------------------------------------
# 1⃣ Basic CRUD create & get_by_id
# ----------------------------------------------------------------------
def test_create_and_get_by_id(member_repo: MemberRepository):
member = member_repo.create(
first_name="Diana",
last_name="Doe",
email="diana@example.com",
phone_number="5553333",
classification_id=3,
notes="New recruit",
is_active=1,
)
# Primary key should be a positive integer (AUTOINCREMENT starts at 1)
assert isinstance(member.MemberId, int) and member.MemberId > 0
# Retrieve the same row
fetched = member_repo.get_by_id(member.MemberId)
assert fetched is not None
assert fetched.FirstName == "Diana"
assert fetched.LastName == "Doe"
assert fetched.Email == "diana@example.com"
assert fetched.ClassificationId == 3
assert fetched.IsActive == 1
assert fetched.Notes == "New recruit"
def test_get_by_id_returns_none_when_missing(member_repo: MemberRepository):
"""A PK that does not exist must return ``None`` (no exception)."""
assert member_repo.get_by_id(9999) is None
# ----------------------------------------------------------------------
# 2⃣ list_all bulk insertion + retrieval
# ----------------------------------------------------------------------
def test_list_all(
member_repo: MemberRepository,
sample_members: List[MemberModel],
clean_members, # ensure we start from an empty table
):
for m in sample_members:
member_repo.create(
first_name=m.FirstName,
last_name=m.LastName,
email=m.Email,
phone_number=m.PhoneNumber,
classification_id=m.ClassificationId,
notes=m.Notes,
is_active=m.IsActive,
)
all_members = member_repo.list_all()
# Because we cleared the table first, we expect exactly the three we added.
assert len(all_members) == 3
# Spotcheck that each name appears
names = {(m.FirstName, m.LastName) for m in all_members}
assert ("Alice", "Anderson") in names
assert ("Bob", "Baker") in names
assert ("Carol", "Carter") in names
# ----------------------------------------------------------------------
# 3⃣ get_by_classification_ids filter by classification list
# ----------------------------------------------------------------------
def test_get_by_classification_ids(
member_repo: MemberRepository,
sample_members: List[MemberModel],
clean_members,
):
for m in sample_members:
member_repo.create(
first_name=m.FirstName,
last_name=m.LastName,
email=m.Email,
phone_number=m.PhoneNumber,
classification_id=m.ClassificationId,
notes=m.Notes,
is_active=m.IsActive,
)
# Classification 1 → Alice + Carol (2 rows)
result = member_repo.get_by_classification_ids([1])
assert len(result) == 2
assert {r.FirstName for r in result} == {"Alice", "Carol"}
# Classification 2 → only Bob
result = member_repo.get_by_classification_ids([2])
assert len(result) == 1
assert result[0].FirstName == "Bob"
# Both classifications → all three
result = member_repo.get_by_classification_ids([1, 2])
assert len(result) == 3
def test_candidate_queue_obeys_boost_and_timestamp_sorting(
member_repo: MemberRepository,
):
"""
Verify that ``candidate_queue`` respects:
1⃣ The boost clause (low ``DeclineStreak`` + recent ``LastDeclinedAt``).
2⃣ ``LastAcceptedAt`` ASC (oldest first, ``NULL`` → farpast).
3⃣ ``LastScheduledAt`` ASC (same handling).
The default boost window is 2days (172800seconds).
Additional rule (as stated in the docstring):
*Members whose ``LastAcceptedAt`` is NULL should appear **before** members
that have a nonNULL acceptance date.*
"""
# --------------------------------------------------------------
# 0⃣ Remove any ServiceAvailability rows that reference the seeded
# members, then delete the seeded members themselves.
# --------------------------------------------------------------
member_repo.db.execute("DELETE FROM ServiceAvailability")
member_repo.db.execute(
f"DELETE FROM {member_repo._TABLE} WHERE MemberId IN (1, 2)"
)
member_repo.db._conn.commit()
# --------------------------------------------------------------
# 1⃣ Build a diverse set of members.
# --------------------------------------------------------------
# ── A active, no timestamps (baseline, NULL acceptance)
a = make_member(member_repo, "Alice", "Anderson")
# ── B active, accepted yesterday (nonNULL acceptance)
yesterday = (dt.datetime.utcnow() - dt.timedelta(days=1)).strftime(
"%Y-%m-%d %H:%M:%S"
)
b = make_member(
member_repo,
"Bob",
"Baker",
accepted_at=yesterday,
)
# ── C active, declined **today** with a low streak (boost candidate)
today_iso = dt.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
c = make_member(
member_repo,
"Carol",
"Clark",
declined_at=today_iso,
decline_streak=1, # < 2 → qualifies for boost
)
# ── D active, declined **3days ago** (outside the 2day boost window,
# still NULL acceptance)
three_days_ago = (dt.datetime.utcnow() - dt.timedelta(days=3)).strftime(
"%Y-%m-%d %H:%M:%S"
)
d = make_member(
member_repo,
"Dave",
"Davis",
declined_at=three_days_ago,
decline_streak=1,
)
# ── E **inactive** member should never appear when only_active=True.
e = make_member(
member_repo,
"Eve",
"Evans",
is_active=0,
)
# --------------------------------------------------------------
# 2⃣ Pull the queue (default: only_active=True, boost_seconds=2days)
# --------------------------------------------------------------
q = member_repo.candidate_queue(classification_ids=[1])
# --------------------------------------------------------------
# 3⃣ Expected order (explain each step):
# --------------------------------------------------------------
# • Boosted members first → Carol (recent decline, streak < 2)
# • Then all members whose ``LastAcceptedAt`` is NULL,
# ordered by ``LastScheduledAt`` (both are NULL, so fallback to PK order):
# → Alice, then Dave
# • Finally members with a nonNULL acceptance date → Bob
# • Eve is inactive → omitted.
expected_first_names = ["Carol", "Alice", "Dave", "Bob"]
assert [m.FirstName for m in q] == expected_first_names
# ----------------------------------------------------------------------
# 5⃣ touch_last_scheduled updates the timestamp column
# ----------------------------------------------------------------------
def test_touch_last_scheduled_updates_timestamp(member_repo: MemberRepository):
member = member_repo.create(
first_name="Eve",
last_name="Evans",
email=None,
phone_number=None,
classification_id=4,
notes=None,
is_active=1,
)
assert member.LastScheduledAt is None
# Call the helper it should set LastScheduledAt to the current UTC time.
member_repo.touch_last_scheduled(member.MemberId)
refreshed = member_repo.get_by_id(member.MemberId)
assert refreshed is not None
assert refreshed.LastScheduledAt is not None
# SQLite stores timestamps as ISO8601 strings; parsing should succeed.
dt.datetime.fromisoformat(refreshed.LastScheduledAt)
# ----------------------------------------------------------------------
# 6⃣ set_last_declined records decline date and increments streak
# ----------------------------------------------------------------------
def test_set_last_declined_resets_streak_and_records_date(member_repo: MemberRepository):
member = member_repo.create(
first_name="Frank",
last_name="Foster",
email=None,
phone_number=None,
classification_id=4,
notes=None,
is_active=1,
)
# Initial state
assert member.DeclineStreak == 0
assert member.LastDeclinedAt is None
# Simulate a decline today.
today_iso = dt.date.today().isoformat()
member_repo.set_last_declined(member.MemberId, today_iso)
refreshed = member_repo.get_by_id(member.MemberId)
assert refreshed.DeclineStreak == 1
assert refreshed.LastDeclinedAt == today_iso
# Simulate a second decline tomorrow streak should increase again.
tomorrow_iso = (dt.date.today() + dt.timedelta(days=1)).isoformat()
member_repo.set_last_declined(member.MemberId, tomorrow_iso)
refreshed2 = member_repo.get_by_id(member.MemberId)
assert refreshed2.DeclineStreak == 2
assert refreshed2.LastDeclinedAt == tomorrow_iso

View File

@@ -0,0 +1,69 @@
# tests/test_service_availability.py
import pytest
def test_grant_and_revoke(
service_availability_repo,
member_repo,
service_type_repo,
):
"""
Verify that:
• `grant` adds a new (member, service_type) pair idempotently.
• `revoke` removes the pair.
• The helper `members_for_type` returns the expected IDs.
"""
# ------------------------------------------------------------------
# Arrange fetch the IDs we know exist from the fixture.
# ------------------------------------------------------------------
# Alice is member_id 1, Bob is member_id 2 (AUTOINCREMENT order).
alice_id = 1
bob_id = 2
# Service type IDs correspond to the order we inserted them:
# 9AM → 1, 11AM → 2, 6PM → 3
nine_am_id = 1
eleven_am_id = 2
six_pm_id = 3
# ------------------------------------------------------------------
# Act try granting a *new* availability that wasn't seeded.
# We'll give Alice the 11AM slot (she didn't have it before).
# ------------------------------------------------------------------
new_pair = service_availability_repo.grant(alice_id, eleven_am_id)
# ------------------------------------------------------------------
# Assert the row exists and the helper returns the right member list.
# ------------------------------------------------------------------
assert new_pair.MemberId == alice_id
assert new_pair.ServiceTypeId == eleven_am_id
# `members_for_type` should now contain Alice (1) **and** Bob (2) for 11AM.
members_for_11am = service_availability_repo.members_for_type(eleven_am_id)
assert set(members_for_11am) == {alice_id, bob_id}
# ------------------------------------------------------------------
# Revoke the newly added pair and ensure it disappears.
# ------------------------------------------------------------------
service_availability_repo.revoke(alice_id, eleven_am_id)
# After revocation the 11AM list should contain **only** Bob.
members_after_revoke = service_availability_repo.members_for_type(eleven_am_id)
assert members_after_revoke == [bob_id]
# Also verify that `get` returns None for the removed pair.
assert service_availability_repo.get(alice_id, eleven_am_id) is None
def test_list_by_member(service_availability_repo):
"""
Validate that `list_by_member` returns exactly the slots we seeded.
"""
# Alice (member_id 1) should have 9AM (1) and 6PM (3)
alice_slots = service_availability_repo.list_by_member(1)
alice_type_ids = sorted([s.ServiceTypeId for s in alice_slots])
assert alice_type_ids == [1, 3]
# Bob (member_id 2) should have 11AM (2) and 6PM (3)
bob_slots = service_availability_repo.list_by_member(2)
bob_type_ids = sorted([s.ServiceTypeId for s in bob_slots])
assert bob_type_ids == [2, 3]

View File

@@ -0,0 +1,62 @@
# tests/test_service_type_repo.py
import pytest
from backend.models.dataclasses import ServiceType as ServiceTypeModel
def test_create_and_find(service_type_repo):
"""
Verify that we can insert a brandnew ServiceType and retrieve it
both by primary key and by name.
"""
# Create a new slot that wasn't part of the seed data.
new_slot = service_type_repo.create("2PM")
assert isinstance(new_slot, ServiceTypeModel)
assert new_slot.TypeName == "2PM"
assert new_slot.ServiceTypeId > 0 # autoincrement worked
# Find by primary key.
fetched_by_id = service_type_repo.get_by_id(new_slot.ServiceTypeId)
assert fetched_by_id == new_slot
# Find by name.
fetched_by_name = service_type_repo.find_by_name("2PM")
assert fetched_by_name == new_slot
def test_list_all_contains_seeded_slots(service_type_repo):
"""
The three seeded slots (9AM, 11AM, 6PM) should be present and sorted
alphabetically by the repository implementation.
"""
all_slots = service_type_repo.list_all()
names = [s.TypeName for s in all_slots]
# The seed fixture inserted exactly these three names.
assert set(names) >= {"9AM", "11AM", "6PM"}
# Because ``list_all`` orders by ``TypeName ASC`` we expect alphabetical order.
assert names == sorted(names)
def test_ensure_slots_is_idempotent(service_type_repo):
"""
``ensure_slots`` should insert missing rows and return the full set,
without creating duplicates on subsequent calls.
"""
# First call inserts the three seed rows plus a brandnew one.
wanted = ["9AM", "11AM", "6PM", "3PM"]
result_first = service_type_repo.ensure_slots(wanted)
# All four names must now exist.
assert {s.TypeName for s in result_first} == set(wanted)
# Capture the IDs for later comparison.
ids_before = {s.TypeName: s.ServiceTypeId for s in result_first}
# Second call should *not* create new rows.
result_second = service_type_repo.ensure_slots(wanted)
ids_after = {s.TypeName: s.ServiceTypeId for s in result_second}
# IDs must be unchanged (no duplicates were added).
assert ids_before == ids_after
assert len(result_second) == len(wanted)

View File