Add side panels, task selection, graph animation, and project docs

- Foldable left panel (user profile) and right panel (task details)
- Clicking a task in the list or graph node selects it and shows details
- Both views (task list + graph) always mounted via absolute inset-0 for
  correct canvas dimensions; tabs toggle visibility with opacity
- Graph node selection animation: other nodes repel outward (charge -600),
  then selected node smoothly slides to center (500ms cubic ease-out),
  then charge restores to -120 and graph stabilizes
- Graph re-fits on tab switch and panel resize via ResizeObserver
- Fix UUID string IDs throughout (backend returns UUIDs, not integers)
- Add TaskDetailPanel, UserPanel components
- Add CLAUDE.md project documentation

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Alvis
2026-04-08 11:23:06 +00:00
parent 5c7edd4bbc
commit f1d51b8cc8
23998 changed files with 3242708 additions and 0 deletions

27
backend/src/db.rs Normal file
View File

@@ -0,0 +1,27 @@
use anyhow::Result;
use sqlx::{sqlite::SqlitePoolOptions, SqlitePool};
pub async fn create_pool(database_url: &str) -> Result<SqlitePool> {
let pool = SqlitePoolOptions::new()
.max_connections(5)
.connect(database_url)
.await?;
Ok(pool)
}
pub async fn run_migrations(pool: &SqlitePool) -> Result<()> {
sqlx::query(
r#"
CREATE TABLE IF NOT EXISTS tasks (
id TEXT PRIMARY KEY NOT NULL,
title TEXT NOT NULL,
description TEXT,
completed BOOLEAN NOT NULL DEFAULT 0,
created_at INTEGER NOT NULL
)
"#,
)
.execute(pool)
.await?;
Ok(())
}

117
backend/src/graph.rs Normal file
View File

@@ -0,0 +1,117 @@
use rand::{Rng, SeedableRng};
use rand::rngs::StdRng;
use crate::models::{GraphData, GraphEdge, GraphNode, Task};
/// Build graph data from a list of tasks.
/// Nodes = tasks, edges = ~30% of all pairs, deterministic seed from task IDs.
pub fn build_graph(tasks: &[Task]) -> GraphData {
let nodes: Vec<GraphNode> = tasks
.iter()
.map(|t| GraphNode {
id: t.id.clone(),
label: t.title.clone(),
completed: t.completed,
})
.collect();
let mut edges = Vec::new();
if tasks.len() < 2 {
return GraphData { nodes, edges };
}
// Derive a deterministic seed from all task IDs concatenated
let seed_str: String = tasks.iter().map(|t| t.id.as_str()).collect::<Vec<_>>().join("");
let seed: u64 = seed_str
.bytes()
.fold(0u64, |acc, b| acc.wrapping_mul(31).wrapping_add(b as u64));
let mut rng = StdRng::seed_from_u64(seed);
for i in 0..tasks.len() {
for j in (i + 1)..tasks.len() {
// ~30% chance of an edge
if rng.gen::<f64>() < 0.30 {
let weight = rng.gen_range(0.1f32..1.0f32);
edges.push(GraphEdge {
source: tasks[i].id.clone(),
target: tasks[j].id.clone(),
weight,
});
}
}
}
GraphData { nodes, edges }
}
#[cfg(test)]
mod tests {
use super::*;
fn make_tasks(n: usize) -> Vec<Task> {
(0..n)
.map(|i| Task {
id: format!("task-{i:04}"),
title: format!("Task {i}"),
description: None,
completed: i % 2 == 0,
created_at: i as i64,
})
.collect()
}
#[test]
fn test_graph_node_count_matches_tasks() {
let tasks = make_tasks(10);
let graph = build_graph(&tasks);
assert_eq!(graph.nodes.len(), 10);
}
#[test]
fn test_graph_edge_count_reasonable() {
let tasks = make_tasks(10);
let graph = build_graph(&tasks);
// 10 tasks => 45 possible pairs. ~30% => roughly 5..20 edges
assert!(graph.edges.len() <= 45);
// At least some edges should exist with 10 tasks
// (statistically overwhelmingly likely, but we just check <= max)
}
#[test]
fn test_graph_deterministic() {
let tasks = make_tasks(8);
let g1 = build_graph(&tasks);
let g2 = build_graph(&tasks);
assert_eq!(g1.edges.len(), g2.edges.len());
for (e1, e2) in g1.edges.iter().zip(g2.edges.iter()) {
assert_eq!(e1.source, e2.source);
assert_eq!(e1.target, e2.target);
}
}
#[test]
fn test_empty_tasks() {
let graph = build_graph(&[]);
assert!(graph.nodes.is_empty());
assert!(graph.edges.is_empty());
}
#[test]
fn test_single_task_no_edges() {
let tasks = make_tasks(1);
let graph = build_graph(&tasks);
assert_eq!(graph.nodes.len(), 1);
assert!(graph.edges.is_empty());
}
#[test]
fn test_edge_weights_in_range() {
let tasks = make_tasks(10);
let graph = build_graph(&tasks);
for edge in &graph.edges {
assert!(edge.weight >= 0.1 && edge.weight < 1.0);
}
}
}

4
backend/src/lib.rs Normal file
View File

@@ -0,0 +1,4 @@
pub mod db;
pub mod graph;
pub mod models;
pub mod routes;

33
backend/src/main.rs Normal file
View File

@@ -0,0 +1,33 @@
use taskpile_backend::{db, routes};
use axum::{
routing::{delete, get, patch, post},
Router,
};
use tower_http::cors::{Any, CorsLayer};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let database_url = "sqlite:taskpile.db?mode=rwc";
let pool = db::create_pool(database_url).await?;
db::run_migrations(&pool).await?;
let cors = CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any);
let app = Router::new()
.route("/api/tasks", get(routes::tasks::list_tasks))
.route("/api/tasks", post(routes::tasks::create_task))
.route("/api/tasks/:id", patch(routes::tasks::update_task))
.route("/api/tasks/:id", delete(routes::tasks::delete_task))
.route("/api/graph", get(routes::graph::get_graph))
.layer(cors)
.with_state(pool);
let listener = tokio::net::TcpListener::bind("0.0.0.0:3001").await?;
println!("Listening on http://0.0.0.0:3001");
axum::serve(listener, app).await?;
Ok(())
}

77
backend/src/models.rs Normal file
View File

@@ -0,0 +1,77 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
pub struct Task {
pub id: String,
pub title: String,
pub description: Option<String>,
pub completed: bool,
pub created_at: i64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GraphNode {
pub id: String,
pub label: String,
pub completed: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GraphEdge {
pub source: String,
pub target: String,
pub weight: f32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GraphData {
pub nodes: Vec<GraphNode>,
pub edges: Vec<GraphEdge>,
}
#[derive(Debug, Deserialize)]
pub struct CreateTask {
pub title: String,
pub description: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct UpdateTask {
pub title: Option<String>,
pub description: Option<String>,
pub completed: Option<bool>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_task_serialization() {
let task = Task {
id: "abc-123".to_string(),
title: "Test task".to_string(),
description: Some("A description".to_string()),
completed: false,
created_at: 1234567890,
};
let json = serde_json::to_string(&task).unwrap();
let deserialized: Task = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.id, task.id);
assert_eq!(deserialized.title, task.title);
assert_eq!(deserialized.completed, task.completed);
}
#[test]
fn test_task_no_description() {
let task = Task {
id: "xyz".to_string(),
title: "No desc".to_string(),
description: None,
completed: true,
created_at: 0,
};
let json = serde_json::to_string(&task).unwrap();
assert!(json.contains("\"description\":null"));
}
}

View File

@@ -0,0 +1,17 @@
use axum::{extract::State, http::StatusCode, Json};
use sqlx::SqlitePool;
use crate::{graph::build_graph, models::{GraphData, Task}};
pub async fn get_graph(
State(pool): State<SqlitePool>,
) -> Result<Json<GraphData>, StatusCode> {
let tasks = sqlx::query_as::<_, Task>(
"SELECT id, title, description, completed, created_at FROM tasks ORDER BY created_at ASC",
)
.fetch_all(&pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
Ok(Json(build_graph(&tasks)))
}

View File

@@ -0,0 +1,2 @@
pub mod graph;
pub mod tasks;

113
backend/src/routes/tasks.rs Normal file
View File

@@ -0,0 +1,113 @@
use axum::{
extract::{Path, State},
http::StatusCode,
Json,
};
use sqlx::SqlitePool;
use std::time::{SystemTime, UNIX_EPOCH};
use uuid::Uuid;
use crate::models::{CreateTask, Task, UpdateTask};
pub async fn list_tasks(
State(pool): State<SqlitePool>,
) -> Result<Json<Vec<Task>>, StatusCode> {
let tasks = sqlx::query_as::<_, Task>(
"SELECT id, title, description, completed, created_at FROM tasks ORDER BY created_at ASC",
)
.fetch_all(&pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
Ok(Json(tasks))
}
pub async fn create_task(
State(pool): State<SqlitePool>,
Json(body): Json<CreateTask>,
) -> Result<(StatusCode, Json<Task>), StatusCode> {
let id = Uuid::new_v4().to_string();
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
sqlx::query(
"INSERT INTO tasks (id, title, description, completed, created_at) VALUES (?, ?, ?, 0, ?)",
)
.bind(&id)
.bind(&body.title)
.bind(&body.description)
.bind(now)
.execute(&pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
let task = Task {
id,
title: body.title,
description: body.description,
completed: false,
created_at: now,
};
Ok((StatusCode::CREATED, Json(task)))
}
pub async fn update_task(
State(pool): State<SqlitePool>,
Path(id): Path<String>,
Json(body): Json<UpdateTask>,
) -> Result<Json<Task>, StatusCode> {
// Fetch existing task
let existing = sqlx::query_as::<_, Task>(
"SELECT id, title, description, completed, created_at FROM tasks WHERE id = ?",
)
.bind(&id)
.fetch_optional(&pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?
.ok_or(StatusCode::NOT_FOUND)?;
let new_title = body.title.unwrap_or(existing.title);
let new_description = body.description.or(existing.description);
let new_completed = body.completed.unwrap_or(existing.completed);
sqlx::query(
"UPDATE tasks SET title = ?, description = ?, completed = ? WHERE id = ?",
)
.bind(&new_title)
.bind(&new_description)
.bind(new_completed)
.bind(&id)
.execute(&pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
let updated = Task {
id,
title: new_title,
description: new_description,
completed: new_completed,
created_at: existing.created_at,
};
Ok(Json(updated))
}
pub async fn delete_task(
State(pool): State<SqlitePool>,
Path(id): Path<String>,
) -> Result<StatusCode, StatusCode> {
let result = sqlx::query("DELETE FROM tasks WHERE id = ?")
.bind(&id)
.execute(&pool)
.await
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
if result.rows_affected() == 0 {
Err(StatusCode::NOT_FOUND)
} else {
Ok(StatusCode::NO_CONTENT)
}
}