Add MLOps feature store, fix UI layout, add docs and Gitea remote

Backend:
- Replace on-the-fly Ollama calls with versioned feature store (task_features, task_edges)
- Background Tokio worker drains pending rows; write path returns immediately
- MLConfig versioning: changing model IDs triggers automatic backfill via next_stale()
- AppState with FromRef; new GET /api/ml/status observability endpoint
- Idempotent mark_pending (content hash guards), retry failed rows after 30s
- Remove tracked build artifacts (backend/target/, frontend/.next/, node_modules/)

Frontend:
- TaskItem: items-center alignment (fixes checkbox/text offset), break-words for overflow
- TaskDetailPanel: fix invisible AI context (text-gray-700→text-gray-400), show all fields
- TaskDetailPanel: pending placeholder when latent_desc not yet computed, show task ID
- GraphView: surface pending_count as amber pulsing "analyzing N tasks…" hint in legend
- Fix Task.created_at type (number/Unix seconds, not string)
- Auth gate: LoginPage + sessionStorage; fix e2e tests to bypass gate in jsdom
- Fix deleteTask test assertion and '1 remaining'→'1 left' stale text

Docs:
- VitePress docs in docs/ with guide, MLOps pipeline, and API reference

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Alvis
2026-04-10 06:16:28 +00:00
parent 95342f852f
commit 9b77d6ea67
23998 changed files with 2593 additions and 3230377 deletions

View File

@@ -5,23 +5,25 @@ use axum::{
use axum_test::TestServer;
use serde_json::{json, Value};
use sqlx::SqlitePool;
use std::sync::Arc;
use taskpile_backend::{db, ml::MLConfig, state::AppState};
use tokio::sync::Notify;
/// Build a TestServer against an in-memory SQLite. We deliberately do NOT
/// spawn the ML worker — tests must not depend on Ollama being reachable.
/// Feature rows will sit in `pending` forever, which is exactly what we want
/// to assert the pure-read graph endpoint behaves correctly in that state.
async fn setup_server() -> TestServer {
let pool = SqlitePool::connect("sqlite::memory:").await.unwrap();
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS tasks (
id TEXT PRIMARY KEY NOT NULL,
title TEXT NOT NULL,
description TEXT,
completed BOOLEAN NOT NULL DEFAULT 0,
created_at INTEGER NOT NULL
)
"#,
)
.execute(&pool)
.await
.unwrap();
sqlx::query("PRAGMA foreign_keys = ON")
.execute(&pool)
.await
.unwrap();
db::run_migrations(&pool).await.unwrap();
let cfg = Arc::new(MLConfig::default());
let notify = Arc::new(Notify::new());
let state = AppState::new(pool, cfg, notify);
let app = Router::new()
.route("/api/tasks", get(taskpile_backend::routes::tasks::list_tasks))
@@ -29,7 +31,8 @@ async fn setup_server() -> TestServer {
.route("/api/tasks/:id", patch(taskpile_backend::routes::tasks::update_task))
.route("/api/tasks/:id", delete(taskpile_backend::routes::tasks::delete_task))
.route("/api/graph", get(taskpile_backend::routes::graph::get_graph))
.with_state(pool);
.route("/api/ml/status", get(taskpile_backend::routes::ml::get_ml_status))
.with_state(state);
TestServer::new(app).unwrap()
}
@@ -57,6 +60,24 @@ async fn test_create_task() {
assert!(body["id"].is_string());
}
#[tokio::test]
async fn test_create_task_seeds_feature_row() {
// Creating a task should immediately insert a `pending` feature row so
// the worker picks it up — the write path shouldn't block on inference.
let server = setup_server().await;
server
.post("/api/tasks")
.json(&json!({"title": "Write docs"}))
.await;
let resp = server.get("/api/ml/status").await;
resp.assert_status_ok();
let status: Value = resp.json();
assert_eq!(status["pending"], 1);
assert_eq!(status["ready"], 0);
assert_eq!(status["failed"], 0);
}
#[tokio::test]
async fn test_crud_flow() {
let server = setup_server().await;
@@ -86,12 +107,14 @@ async fn test_crud_flow() {
assert_eq!(updated["completed"], true);
assert_eq!(updated["title"], "Write tests");
// Graph
// Graph — nodes present, edges empty (worker not running in tests).
let resp = server.get("/api/graph").await;
resp.assert_status_ok();
let graph: Value = resp.json();
assert_eq!(graph["nodes"].as_array().unwrap().len(), 1);
assert!(graph["edges"].is_array());
assert_eq!(graph["edges"].as_array().unwrap().len(), 0);
assert_eq!(graph["pending_count"], 1);
// Delete
let resp = server.delete(&format!("/api/tasks/{id}")).await;
@@ -104,6 +127,25 @@ async fn test_crud_flow() {
assert_eq!(tasks.as_array().unwrap().len(), 0);
}
#[tokio::test]
async fn test_delete_cascades_to_features() {
// Deleting a task should cascade to task_features via FK ON DELETE.
let server = setup_server().await;
let resp = server
.post("/api/tasks")
.json(&json!({"title": "Temp"}))
.await;
let id = resp.json::<Value>()["id"].as_str().unwrap().to_string();
server.delete(&format!("/api/tasks/{id}")).await;
let resp = server.get("/api/ml/status").await;
let status: Value = resp.json();
assert_eq!(status["pending"], 0);
assert_eq!(status["ready"], 0);
assert_eq!(status["failed"], 0);
}
#[tokio::test]
async fn test_delete_nonexistent_returns_404() {
let server = setup_server().await;
@@ -119,6 +161,7 @@ async fn test_graph_endpoint_structure() {
let graph: Value = resp.json();
assert!(graph["nodes"].is_array());
assert!(graph["edges"].is_array());
assert_eq!(graph["pending_count"], 0);
}
#[tokio::test]
@@ -134,9 +177,11 @@ async fn test_create_task_with_description() {
}
#[tokio::test]
async fn test_graph_with_multiple_tasks_has_edges() {
async fn test_graph_nodes_present_without_worker() {
// Without a worker running, features never become ready, so the graph
// contains all nodes but no edges. This is the intended degraded mode:
// users still see their tasks even if Ollama is down.
let server = setup_server().await;
// Create enough tasks to statistically guarantee edges (~30% of pairs)
for i in 0..10 {
server
.post("/api/tasks")
@@ -147,7 +192,18 @@ async fn test_graph_with_multiple_tasks_has_edges() {
resp.assert_status_ok();
let graph: Value = resp.json();
assert_eq!(graph["nodes"].as_array().unwrap().len(), 10);
// 45 possible pairs at 30% => highly likely to have at least 1
let edge_count = graph["edges"].as_array().unwrap().len();
assert!(edge_count <= 45);
assert_eq!(graph["edges"].as_array().unwrap().len(), 0);
assert_eq!(graph["pending_count"], 10);
}
#[tokio::test]
async fn test_ml_status_reports_config() {
let server = setup_server().await;
let resp = server.get("/api/ml/status").await;
resp.assert_status_ok();
let status: Value = resp.json();
assert!(status["desc_model"].is_string());
assert!(status["embed_model"].is_string());
assert!(status["prompt_version"].is_string());
assert!(status["min_similarity"].is_number());
}