From 6a16f4d51dffa87a8565d892c796db81fc13f5d2 Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Sun, 15 Mar 2026 18:22:17 +0100 Subject: [PATCH 01/10] fix(nav): canonicalize math routing and wire orphan pages into navigation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Navigation IA cleanup: math route consolidation and sidebar discoverability improvements across multiple sections. Math routing: - Add /math hub page (MathLandingView) linking to Refresh and Bridge - Add canonical routes /math/refresh and /math/bridge - Legacy /math-refresh and /math-bridge redirect with params preserved - Reference deep links untouched Sidebar wiring (Learning + Career): - Add Cognitive Toolkit and Behavioral Design to Learning section - Add Career Foundations to Career section - Extend Learning and Career header active states Sidebar wiring (Projects + Business): - Add OSS Projects and HF Projects to Projects section - Add Brand Studio and Execution Playbook to Business section - Extend parent header active states Section rename: - "Side Projects" → "Business" (label + aria only) Docs deep-dives: - Add 6 deep-dive sub-items under Docs > How It Works - Extend Docs header active state for deep-dive routes - Add mcp_ecosystem ViewId for /how-mcp-works - /how-gpu-works intentionally left in Reference --- frontend/src/components/Sidebar.tsx | 1263 +++++++++++++----------- frontend/src/router.tsx | 71 +- frontend/src/routes/math.tsx | 23 + frontend/src/views/MathLandingView.tsx | 64 ++ 4 files changed, 843 insertions(+), 578 deletions(-) create mode 100644 frontend/src/routes/math.tsx create mode 100644 frontend/src/views/MathLandingView.tsx diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index a521729..81d8b62 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -67,6 +67,8 @@ export type ViewId = | "product_lab" | "career" | "career_opportunities" + | "math_landing" + | "math_refresh_active" | "math_bridge" | "math_bridge_core_numeracy" | "math_bridge_high_school" @@ -144,6 +146,7 @@ export type ViewId = | "architecture_deep_dive_resilience" | "architecture_deep_dive_storage" | "architecture_deep_dive_stack" + | "mcp_ecosystem" | "changelog" | "design_system_components" | "design_system_coverage" @@ -1512,647 +1515,668 @@ export function Sidebar({ : url_tab === "build" ? "gpu_infra_build" : "gpu_infra_overview" - : pathname.startsWith("/changelog") - ? "changelog" - : pathname.startsWith( - "/design-system", - ) - ? url_tab === "coverage" - ? "design_system_coverage" - : "design_system_components" - : pathname.startsWith("/taxonomy") - ? url_tab === "skills" - ? "taxonomy_skills" - : url_tab === "domains" - ? "taxonomy_domains" - : url_tab === "composites" - ? "taxonomy_composites" - : url_tab === "roles" - ? "taxonomy_roles" - : "taxonomy_overview" + : pathname.startsWith( + "/how-mcp-works", + ) + ? "mcp_ecosystem" + : pathname.startsWith("/changelog") + ? "changelog" + : pathname.startsWith( + "/design-system", + ) + ? url_tab === "coverage" + ? "design_system_coverage" + : "design_system_components" : pathname.startsWith( - "/startup-challenge", + "/taxonomy", ) - ? "startup_challenge" + ? url_tab === "skills" + ? "taxonomy_skills" + : url_tab === "domains" + ? "taxonomy_domains" + : url_tab === "composites" + ? "taxonomy_composites" + : url_tab === "roles" + ? "taxonomy_roles" + : "taxonomy_overview" : pathname.startsWith( - "/sonic-dna", + "/startup-challenge", ) - ? "sonic_dna" + ? "startup_challenge" : pathname.startsWith( - "/chinese", + "/sonic-dna", ) - ? url_tab === "vocab" - ? "chinese_vocab" - : url_tab === "lessons" - ? "chinese_lessons" - : url_tab === "review" - ? "chinese_review" - : "chinese_dashboard" + ? "sonic_dna" : pathname.startsWith( - "/cantonese", + "/chinese", ) ? url_tab === "vocab" - ? "cantonese_vocab" + ? "chinese_vocab" : url_tab === "lessons" - ? "cantonese_lessons" + ? "chinese_lessons" : url_tab === "review" - ? "cantonese_review" - : "cantonese_dashboard" + ? "chinese_review" + : "chinese_dashboard" : pathname.startsWith( - "/module-48", + "/cantonese", ) - ? url_tab === "pnl" - ? "module48_pnl" + ? url_tab === "vocab" + ? "cantonese_vocab" : url_tab === - "pennylane" - ? "module48_pennylane" - : "module48_operations" + "lessons" + ? "cantonese_lessons" + : url_tab === + "review" + ? "cantonese_review" + : "cantonese_dashboard" : pathname.startsWith( - "/shopify-architect", - ) || - pathname.startsWith( - "/shopify", + "/module-48", ) - ? url_tab === - "skills" - ? "shopify_skills" - : "shopify_architect" + ? url_tab === "pnl" + ? "module48_pnl" + : url_tab === + "pennylane" + ? "module48_pennylane" + : "module48_operations" : pathname.startsWith( - "/linkedin-network", + "/shopify-architect", ) || pathname.startsWith( - "/linkedin-reset", - ) || - pathname.startsWith( - "/social", + "/shopify", ) ? url_tab === - "playbook" - ? "linkedin_playbook" - : url_tab === - "audit" - ? "linkedin_audit" - : "linkedin_network" + "skills" + ? "shopify_skills" + : "shopify_architect" : pathname.startsWith( - "/codex-productivity", + "/linkedin-network", + ) || + pathname.startsWith( + "/linkedin-reset", + ) || + pathname.startsWith( + "/social", ) - ? "codex_productivity" + ? url_tab === + "playbook" + ? "linkedin_playbook" + : url_tab === + "audit" + ? "linkedin_audit" + : "linkedin_network" : pathname.startsWith( - "/claude-productivity", + "/codex-productivity", ) - ? "claude_productivity" + ? "codex_productivity" : pathname.startsWith( - "/cowork-productivity", + "/claude-productivity", ) - ? "cowork_productivity" + ? "claude_productivity" : pathname.startsWith( - "/obsidian-productivity", + "/cowork-productivity", ) - ? "obsidian_productivity" + ? "cowork_productivity" : pathname.startsWith( - "/gmail-productivity", + "/obsidian-productivity", ) - ? "gmail_productivity" + ? "obsidian_productivity" : pathname.startsWith( - "/gemini-productivity", + "/gmail-productivity", ) - ? "gemini_productivity" + ? "gmail_productivity" : pathname.startsWith( - "/excel-productivity", + "/gemini-productivity", ) - ? "excel_productivity" + ? "gemini_productivity" : pathname.startsWith( - "/presentations-productivity", + "/excel-productivity", ) - ? "presentations_productivity" + ? "excel_productivity" : pathname.startsWith( - "/claude-video-productivity", + "/presentations-productivity", ) - ? "claude_video_productivity" + ? "presentations_productivity" : pathname.startsWith( - "/powerpoint-productivity", + "/claude-video-productivity", ) - ? "powerpoint_productivity" + ? "claude_video_productivity" : pathname.startsWith( - "/claude-frontend-productivity", + "/powerpoint-productivity", ) - ? "claude_frontend_productivity" + ? "powerpoint_productivity" : pathname.startsWith( - "/execution-playbook", - ) || - pathname.startsWith( - "/house-rules", + "/claude-frontend-productivity", ) - ? url_tab === - "prompt_tactics" - ? "execution_playbook_prompt_tactics" - : url_tab === - "parallel_ops" - ? "execution_playbook_parallel_ops" - : url_tab === - "remote_access" - ? "execution_playbook_remote_access" - : "execution_playbook_checklists" + ? "claude_frontend_productivity" : pathname.startsWith( - "/completion", + "/execution-playbook", + ) || + pathname.startsWith( + "/house-rules", ) - ? "completion" + ? url_tab === + "prompt_tactics" + ? "execution_playbook_prompt_tactics" + : url_tab === + "parallel_ops" + ? "execution_playbook_parallel_ops" + : url_tab === + "remote_access" + ? "execution_playbook_remote_access" + : "execution_playbook_checklists" : pathname.startsWith( - "/monitor", + "/completion", ) - ? url_tab === - "architecture" - ? "monitor_architecture" - : url_tab === - "evals" - ? "monitor_evals" + ? "completion" + : pathname.startsWith( + "/monitor", + ) + ? url_tab === + "architecture" + ? "monitor_architecture" : url_tab === - "test-cases" - ? "monitor_test_cases" + "evals" + ? "monitor_evals" : url_tab === - "databases" - ? "monitor_databases" + "test-cases" + ? "monitor_test_cases" : url_tab === - "costs" - ? "monitor_costs" + "databases" + ? "monitor_databases" : url_tab === - "tracing" - ? "monitor_tracing" + "costs" + ? "monitor_costs" : url_tab === - "plan-usage" - ? "monitor_plan_usage" + "tracing" + ? "monitor_tracing" : url_tab === - "playbook" - ? "monitor_playbook" + "plan-usage" + ? "monitor_plan_usage" : url_tab === - "codex-productivity" - ? "codex_productivity" + "playbook" + ? "monitor_playbook" : url_tab === - "claude-productivity" - ? "claude_productivity" + "codex-productivity" + ? "codex_productivity" : url_tab === - "cowork-productivity" - ? "cowork_productivity" - : "monitor_health" - : pathname.startsWith( - "/figma", - ) - ? "figma" + "claude-productivity" + ? "claude_productivity" + : url_tab === + "cowork-productivity" + ? "cowork_productivity" + : "monitor_health" : pathname.startsWith( - "/design-tools", + "/figma", ) - ? url_tab === - "tools" - ? "design_tools_tools" - : url_tab === - "compare" - ? "design_tools_compare" - : "design_tools_overview" + ? "figma" : pathname.startsWith( - "/career-accelerator", + "/design-tools", ) ? url_tab === - "opportunities" - ? "career_opportunities" - : "career" + "tools" + ? "design_tools_tools" + : url_tab === + "compare" + ? "design_tools_compare" + : "design_tools_overview" : pathname.startsWith( - "/math-bridge", + "/career-accelerator", ) ? url_tab === - "core_numeracy" - ? "math_bridge_core_numeracy" - : url_tab === - "high_school" - ? "math_bridge_high_school" - : url_tab === - "pre_university" - ? "math_bridge_pre_university" - : url_tab === - "engineering_prep" - ? "math_bridge_engineering_prep" - : "math_bridge" - : pathname.startsWith( - "/career-foundations", - ) - ? url_tab === - "msc_refresh" - ? "career_foundations_msc" - : url_tab === - "assessment" - ? "career_foundations_assessment" + "opportunities" + ? "career_opportunities" + : "career" + : pathname === + "/math" + ? "math_landing" + : pathname.startsWith( + "/math/bridge", + ) || + pathname.startsWith( + "/math-bridge", + ) + ? url_tab === + "core_numeracy" + ? "math_bridge_core_numeracy" : url_tab === - "ai_labs" - ? "career_foundations_ai_labs" + "high_school" + ? "math_bridge_high_school" : url_tab === - "forward_deployed" - ? "career_foundations_fde" + "pre_university" + ? "math_bridge_pre_university" : url_tab === - "cloud_consulting" - ? "career_foundations_cloud" + "engineering_prep" + ? "math_bridge_engineering_prep" + : "math_bridge" + : pathname.startsWith( + "/math/refresh", + ) || + pathname.startsWith( + "/math-refresh", + ) + ? "math_refresh_active" + : pathname.startsWith( + "/career-foundations", + ) + ? url_tab === + "msc_refresh" + ? "career_foundations_msc" + : url_tab === + "assessment" + ? "career_foundations_assessment" : url_tab === - "french_enterprise" - ? "career_foundations_french" + "ai_labs" + ? "career_foundations_ai_labs" : url_tab === - "adtech" - ? "career_foundations_adtech" + "forward_deployed" + ? "career_foundations_fde" : url_tab === - "faang" - ? "career_foundations_faang" + "cloud_consulting" + ? "career_foundations_cloud" : url_tab === - "data_platform" - ? "career_foundations_platform" + "french_enterprise" + ? "career_foundations_french" : url_tab === - "supply_chain" - ? "career_foundations_supply_chain" - : "career_foundations" - : pathname.startsWith( - "/high-performance", - ) - ? "high_performance" - : pathname.startsWith( - "/product-lab", - ) - ? "product_lab" - : pathname.startsWith( - "/apps", - ) - ? "my_apps" + "adtech" + ? "career_foundations_adtech" + : url_tab === + "faang" + ? "career_foundations_faang" + : url_tab === + "data_platform" + ? "career_foundations_platform" + : url_tab === + "supply_chain" + ? "career_foundations_supply_chain" + : "career_foundations" : pathname.startsWith( - "/brand-studio", + "/high-performance", ) - ? url_tab === - "collections" - ? "brand_studio_collections" - : "brand_studio_assets" + ? "high_performance" : pathname.startsWith( - "/events", + "/product-lab", ) - ? "events" + ? "product_lab" : pathname.startsWith( - "/reference-tracks", + "/apps", ) - ? "reference_tracks" + ? "my_apps" : pathname.startsWith( - "/oss-projects", + "/brand-studio", ) - ? "oss_projects" + ? url_tab === + "collections" + ? "brand_studio_collections" + : "brand_studio_assets" : pathname.startsWith( - "/hf-projects", + "/events", ) - ? "hf_projects" + ? "events" : pathname.startsWith( - "/elite-toolbox", + "/reference-tracks", ) - ? "elite_toolbox" + ? "reference_tracks" : pathname.startsWith( - "/dev-ref", - ) - ? get_dev_ref_view( - url_tab, + "/oss-projects", ) + ? "oss_projects" : pathname.startsWith( - "/prep", - ) - ? get_prep_view( - url_tab, + "/hf-projects", ) + ? "hf_projects" : pathname.startsWith( - "/applied-systems", + "/elite-toolbox", ) - ? url_tab === - "dataops" - ? "applied_systems_dataops" - : url_tab === - "recsys" - ? "applied_systems_recsys" - : url_tab === - "evals" - ? "applied_systems_evals" - : url_tab === - "worldmodels" - ? "applied_systems_worldmodels" - : url_tab === - "3d_vision" - ? "applied_systems_3d_vision" - : url_tab === - "distributed_ml" - ? "applied_systems_distributed_ml" - : "applied_systems_llmops" + ? "elite_toolbox" : pathname.startsWith( - "/embodied-ai", + "/dev-ref", + ) + ? get_dev_ref_view( + url_tab, ) - ? url_tab === - "humanoid" - ? "embodied_ai_humanoid" - : url_tab === - "service" - ? "embodied_ai_service" - : url_tab === - "autonomous" - ? "embodied_ai_autonomous" - : url_tab === - "agentic" - ? "embodied_ai_agentic" - : url_tab === - "edge_inference" - ? "embodied_ai_edge_inference" - : url_tab === - "world_models" - ? "embodied_ai_world_models" - : "embodied_ai_core" : pathname.startsWith( - "/ai-engineering", + "/prep", + ) + ? get_prep_view( + url_tab, ) - ? url_tab === - "agents" - ? "ai_engineering_agents" - : url_tab === - "evals" - ? "ai_engineering_evals" - : url_tab === - "retrieval" - ? "ai_engineering_retrieval" - : url_tab === - "memory" - ? "ai_engineering_memory" - : url_tab === - "fine_tuning" - ? "ai_engineering_fine_tuning" - : url_tab === - "multimodal" - ? "ai_engineering_multimodal" - : url_tab === - "reasoning" - ? "ai_engineering_reasoning" - : "ai_engineering_inference" : pathname.startsWith( - "/frontend-eng", + "/applied-systems", ) ? url_tab === - "components" - ? "frontend_eng_components" + "dataops" + ? "applied_systems_dataops" : url_tab === - "data_layer" - ? "frontend_eng_data_layer" + "recsys" + ? "applied_systems_recsys" : url_tab === - "performance" - ? "frontend_eng_performance" + "evals" + ? "applied_systems_evals" : url_tab === - "typescript" - ? "frontend_eng_typescript" + "worldmodels" + ? "applied_systems_worldmodels" : url_tab === - "testing" - ? "frontend_eng_testing" + "3d_vision" + ? "applied_systems_3d_vision" : url_tab === - "architecture" - ? "frontend_eng_architecture" - : "frontend_eng_state" + "distributed_ml" + ? "applied_systems_distributed_ml" + : "applied_systems_llmops" : pathname.startsWith( - "/agents", + "/embodied-ai", ) ? url_tab === - "resources" - ? "agents_resources" - : "agents_roadmap" + "humanoid" + ? "embodied_ai_humanoid" + : url_tab === + "service" + ? "embodied_ai_service" + : url_tab === + "autonomous" + ? "embodied_ai_autonomous" + : url_tab === + "agentic" + ? "embodied_ai_agentic" + : url_tab === + "edge_inference" + ? "embodied_ai_edge_inference" + : url_tab === + "world_models" + ? "embodied_ai_world_models" + : "embodied_ai_core" : pathname.startsWith( - "/tech-radar", + "/ai-engineering", ) ? url_tab === - "blogs" - ? "tech_radar_blogs" + "agents" + ? "ai_engineering_agents" : url_tab === - "tools" - ? "tech_radar_tools" - : "tech_radar_strategy" + "evals" + ? "ai_engineering_evals" + : url_tab === + "retrieval" + ? "ai_engineering_retrieval" + : url_tab === + "memory" + ? "ai_engineering_memory" + : url_tab === + "fine_tuning" + ? "ai_engineering_fine_tuning" + : url_tab === + "multimodal" + ? "ai_engineering_multimodal" + : url_tab === + "reasoning" + ? "ai_engineering_reasoning" + : "ai_engineering_inference" : pathname.startsWith( - "/mcp", + "/frontend-eng", ) ? url_tab === - "docs" - ? "mcp_docs" - : "mcp_dashboard" + "components" + ? "frontend_eng_components" + : url_tab === + "data_layer" + ? "frontend_eng_data_layer" + : url_tab === + "performance" + ? "frontend_eng_performance" + : url_tab === + "typescript" + ? "frontend_eng_typescript" + : url_tab === + "testing" + ? "frontend_eng_testing" + : url_tab === + "architecture" + ? "frontend_eng_architecture" + : "frontend_eng_state" : pathname.startsWith( - "/skills", + "/agents", ) - ? "skills" + ? url_tab === + "resources" + ? "agents_resources" + : "agents_roadmap" : pathname.startsWith( - "/gpu-for-ai", + "/tech-radar", ) ? url_tab === - "cuda" - ? "gpu_for_ai_cuda" + "blogs" + ? "tech_radar_blogs" : url_tab === - "distributed" - ? "gpu_for_ai_distributed" - : url_tab === - "memory" - ? "gpu_for_ai_memory" - : url_tab === - "cloud" - ? "gpu_for_ai_cloud" - : url_tab === - "profiling" - ? "gpu_for_ai_profiling" - : url_tab === - "networking" - ? "gpu_for_ai_networking" - : url_tab === - "alternatives" - ? "gpu_for_ai_alternatives" - : "gpu_for_ai_architecture" + "tools" + ? "tech_radar_tools" + : "tech_radar_strategy" : pathname.startsWith( - "/bio-augmentation", + "/mcp", ) ? url_tab === - "neurotech" - ? "bio_augmentation_neurotech" - : url_tab === - "wearables" - ? "bio_augmentation_wearables" - : url_tab === - "biohacking" - ? "bio_augmentation_biohacking" - : url_tab === - "translation" - ? "bio_augmentation_translation" - : url_tab === - "convergence" - ? "bio_augmentation_convergence" - : "bio_augmentation_foundations" + "docs" + ? "mcp_docs" + : "mcp_dashboard" : pathname.startsWith( - "/math-refresh", + "/skills", ) - ? url_track === - "prepa_ml" + ? "skills" + : pathname.startsWith( + "/gpu-for-ai", + ) ? url_tab === - "linear_algebra" - ? "math_refresh_pml_linear_algebra" + "cuda" + ? "gpu_for_ai_cuda" : url_tab === - "analysis" - ? "math_refresh_pml_analysis" + "distributed" + ? "gpu_for_ai_distributed" : url_tab === - "probability" - ? "math_refresh_pml_probability" + "memory" + ? "gpu_for_ai_memory" : url_tab === - "applied_ml" - ? "math_refresh_pml_applied_ml" + "cloud" + ? "gpu_for_ai_cloud" : url_tab === - "geometry_3d" - ? "math_refresh_pml_geometry_3d" + "profiling" + ? "gpu_for_ai_profiling" : url_tab === - "dynamics_physics" - ? "math_refresh_pml_dynamics_physics" + "networking" + ? "gpu_for_ai_networking" : url_tab === - "evaluation" - ? "math_refresh_pml_evaluation" - : "math_refresh_pml_methode" - : url_tab === - "diagnostic" - ? "math_refresh_z2o_diagnostic" - : url_tab === - "college" - ? "math_refresh_z2o_college" - : url_tab === - "lycee" - ? "math_refresh_z2o_lycee" - : url_tab === - "terminale" - ? "math_refresh_z2o_terminale" - : url_tab === - "evaluation" - ? "math_refresh_z2o_evaluation" - : "math_refresh_z2o_methode" - : pathname.startsWith( - "/culture-generale", - ) - ? url_track === - "humanites" - ? url_tab === - "french_philo" - ? "culture_generale_humanites_french_philo" - : url_tab === - "literature" - ? "culture_generale_humanites_literature" - : "culture_generale_humanites_philo_science" - : url_track === - "sciences_sociales" - ? url_tab === - "political_philo" - ? "culture_generale_sciences_sociales_political_philo" - : url_tab === - "history" - ? "culture_generale_sciences_sociales_history" - : "culture_generale_sciences_sociales_economics" - : url_tab === - "information_theory" - ? "culture_generale_sciences_information_theory" - : url_tab === - "biology_neuro" - ? "culture_generale_sciences_biology_neuro" - : "culture_generale_sciences_physics" + "alternatives" + ? "gpu_for_ai_alternatives" + : "gpu_for_ai_architecture" : pathname.startsWith( - "/cognitive-toolkit", + "/bio-augmentation", ) ? url_tab === - "operating_system" - ? "cognitive_toolkit_operating_system" + "neurotech" + ? "bio_augmentation_neurotech" : url_tab === - "techniques" - ? "cognitive_toolkit_techniques" + "wearables" + ? "bio_augmentation_wearables" : url_tab === - "worldview" - ? "cognitive_toolkit_worldview" + "biohacking" + ? "bio_augmentation_biohacking" : url_tab === - "library" - ? "cognitive_toolkit_library" + "translation" + ? "bio_augmentation_translation" : url_tab === - "playbook" - ? "cognitive_toolkit_playbook" + "convergence" + ? "bio_augmentation_convergence" + : "bio_augmentation_foundations" + : pathname.startsWith( + "/math-refresh", + ) + ? url_track === + "prepa_ml" + ? url_tab === + "linear_algebra" + ? "math_refresh_pml_linear_algebra" + : url_tab === + "analysis" + ? "math_refresh_pml_analysis" + : url_tab === + "probability" + ? "math_refresh_pml_probability" : url_tab === - "operators" - ? "cognitive_toolkit_operators" + "applied_ml" + ? "math_refresh_pml_applied_ml" : url_tab === - "social_dynamics" - ? "cognitive_toolkit_social_dynamics" + "geometry_3d" + ? "math_refresh_pml_geometry_3d" : url_tab === - "ai_leverage" - ? "cognitive_toolkit_ai_leverage" - : "cognitive_toolkit_foundation" - : pathname.startsWith( - "/behavioral-design", - ) - ? url_tab === - "feed_design" - ? "behavioral_design_feed_design" + "dynamics_physics" + ? "math_refresh_pml_dynamics_physics" + : url_tab === + "evaluation" + ? "math_refresh_pml_evaluation" + : "math_refresh_pml_methode" : url_tab === - "social_loops" - ? "behavioral_design_social_loops" + "diagnostic" + ? "math_refresh_z2o_diagnostic" : url_tab === - "variable_rewards" - ? "behavioral_design_variable_rewards" + "college" + ? "math_refresh_z2o_college" : url_tab === - "friction" - ? "behavioral_design_friction" + "lycee" + ? "math_refresh_z2o_lycee" : url_tab === - "notifications" - ? "behavioral_design_notifications" + "terminale" + ? "math_refresh_z2o_terminale" : url_tab === - "gamification" - ? "behavioral_design_gamification" - : url_tab === - "case_studies" - ? "behavioral_design_case_studies" - : "behavioral_design_frameworks" + "evaluation" + ? "math_refresh_z2o_evaluation" + : "math_refresh_z2o_methode" : pathname.startsWith( - "/elite-freelance", + "/culture-generale", ) - ? url_tab === - "realtime_systems" - ? "elite_freelance_realtime_systems" - : url_tab === - "apis_at_scale" - ? "elite_freelance_apis_at_scale" + ? url_track === + "humanites" + ? url_tab === + "french_philo" + ? "culture_generale_humanites_french_philo" : url_tab === - "ai_agent_infra" - ? "elite_freelance_ai_agent_infra" + "literature" + ? "culture_generale_humanites_literature" + : "culture_generale_humanites_philo_science" + : url_track === + "sciences_sociales" + ? url_tab === + "political_philo" + ? "culture_generale_sciences_sociales_political_philo" : url_tab === - "production_hardening" - ? "elite_freelance_production_hardening" - : url_tab === - "positioning" - ? "elite_freelance_positioning" - : "elite_freelance_realtime_systems" + "history" + ? "culture_generale_sciences_sociales_history" + : "culture_generale_sciences_sociales_economics" + : url_tab === + "information_theory" + ? "culture_generale_sciences_information_theory" + : url_tab === + "biology_neuro" + ? "culture_generale_sciences_biology_neuro" + : "culture_generale_sciences_physics" : pathname.startsWith( - "/tooling", + "/cognitive-toolkit", ) - ? "tooling" - : pathname === - "/learn" - ? "learn_concepts" - : pathname === - "/learn/tracks" - ? "curriculum_tracks" - : pathname.match( - /^\/learn\/tracks\/[^/]+\/modules\//, + ? url_tab === + "operating_system" + ? "cognitive_toolkit_operating_system" + : url_tab === + "techniques" + ? "cognitive_toolkit_techniques" + : url_tab === + "worldview" + ? "cognitive_toolkit_worldview" + : url_tab === + "library" + ? "cognitive_toolkit_library" + : url_tab === + "playbook" + ? "cognitive_toolkit_playbook" + : url_tab === + "operators" + ? "cognitive_toolkit_operators" + : url_tab === + "social_dynamics" + ? "cognitive_toolkit_social_dynamics" + : url_tab === + "ai_leverage" + ? "cognitive_toolkit_ai_leverage" + : "cognitive_toolkit_foundation" + : pathname.startsWith( + "/behavioral-design", + ) + ? url_tab === + "feed_design" + ? "behavioral_design_feed_design" + : url_tab === + "social_loops" + ? "behavioral_design_social_loops" + : url_tab === + "variable_rewards" + ? "behavioral_design_variable_rewards" + : url_tab === + "friction" + ? "behavioral_design_friction" + : url_tab === + "notifications" + ? "behavioral_design_notifications" + : url_tab === + "gamification" + ? "behavioral_design_gamification" + : url_tab === + "case_studies" + ? "behavioral_design_case_studies" + : "behavioral_design_frameworks" + : pathname.startsWith( + "/elite-freelance", + ) + ? url_tab === + "realtime_systems" + ? "elite_freelance_realtime_systems" + : url_tab === + "apis_at_scale" + ? "elite_freelance_apis_at_scale" + : url_tab === + "ai_agent_infra" + ? "elite_freelance_ai_agent_infra" + : url_tab === + "production_hardening" + ? "elite_freelance_production_hardening" + : url_tab === + "positioning" + ? "elite_freelance_positioning" + : "elite_freelance_realtime_systems" + : pathname.startsWith( + "/tooling", ) - ? "curriculum_module_detail" - : pathname.match( - /^\/learn\/tracks\/[^/]+$/, - ) - ? "curriculum_track_detail" + ? "tooling" + : pathname === + "/learn" + ? "learn_concepts" : pathname === - "/learn/lenses" - ? "learn_lenses" - : pathname === - "/learn/levels" - ? "learn_levels" - : pathname.startsWith( - "/learn/", + "/learn/tracks" + ? "curriculum_tracks" + : pathname.match( + /^\/learn\/tracks\/[^/]+\/modules\//, + ) + ? "curriculum_module_detail" + : pathname.match( + /^\/learn\/tracks\/[^/]+$/, ) - ? "learn_concept_detail" - : "none"; + ? "curriculum_track_detail" + : pathname === + "/learn/lenses" + ? "learn_lenses" + : pathname === + "/learn/levels" + ? "learn_levels" + : pathname.startsWith( + "/learn/", + ) + ? "learn_concept_detail" + : "none"; const current_conversation_id = active_view === "chat" ? (params.conversationId ?? null) : null; @@ -2384,6 +2408,104 @@ export function Sidebar({ onClick={() => navigate({ to: "/chat" })} className="nav-accent-glow-red" /> + {/* Learning */} +
+
+ } + label="Learning" + active={ + active_view.startsWith("learn_") || + active_view.startsWith("curriculum_") || + active_view.startsWith("chinese_") || + active_view.startsWith("cantonese_") || + active_view.startsWith("math_") || + active_view.startsWith("culture_generale_") || + active_view.startsWith("cognitive_toolkit_") || + active_view.startsWith("behavioral_design_") + } + onClick={() => navigate({ to: "/learn/tracks" })} + /> +
+ {sidebarExpanded && ( + toggle_expandable_nav("learning")} + className="p-1 text-[var(--sb-sidebar-text-muted)] hover:text-[var(--sb-text-primary)] transition-colors" + label={ + expandable_nav.learning + ? "Minimize learning tabs" + : "Expand learning tabs" + } + tooltipSide="right" + > + {expandable_nav.learning ? ( + + ) : ( + + )} + + )} +
+ + navigate({ to: "/learn/tracks" })} + /> + navigate({ to: "/learn" })} + /> + navigate({ to: "/learn/lenses" })} + /> + navigate({ to: "/learn/levels" })} + /> + navigate({ to: "/chinese" })} + /> + navigate({ to: "/cantonese" })} + /> + navigate({ to: "/math" })} + /> + navigate({ to: "/culture-generale" })} + /> + navigate({ to: "/cognitive-toolkit" })} + /> + navigate({ to: "/behavioral-design" })} + /> +
navigate({ to: "/career-accelerator" })} /> @@ -2456,6 +2579,12 @@ export function Sidebar({ }) } /> + } + label="Foundations" + active={active_view.startsWith("career_foundations")} + onClick={() => navigate({ to: "/career-foundations" })} + />
@@ -2463,7 +2592,10 @@ export function Sidebar({ icon={} label="Projects" active={ - active_view === "projects" || active_view === "projects_ai" + active_view === "projects" || + active_view === "projects_ai" || + active_view === "oss_projects" || + active_view === "hf_projects" } onClick={() => navigate({ to: "/projects/enterprise-projects" }) @@ -2504,6 +2636,18 @@ export function Sidebar({ navigate({ to: "/projects/ai-generated-projects" }) } /> + } + label="OSS Projects" + active={active_view === "oss_projects"} + onClick={() => navigate({ to: "/oss-projects" })} + /> + } + label="HF Projects" + active={active_view === "hf_projects"} + onClick={() => navigate({ to: "/hf-projects" })} + />
@@ -3977,87 +4121,6 @@ export function Sidebar({ )} - {/* Learning */} -
-
- } - label="Learning" - active={ - active_view.startsWith("learn_") || - active_view.startsWith("curriculum_") || - active_view.startsWith("chinese_") || - active_view.startsWith("cantonese_") || - active_view.startsWith("math_refresh_") || - active_view.startsWith("culture_generale_") - } - onClick={() => navigate({ to: "/learn/tracks" })} - /> -
- {sidebarExpanded && ( - toggle_expandable_nav("learning")} - className="p-1 text-[var(--sb-sidebar-text-muted)] hover:text-[var(--sb-text-primary)] transition-colors" - label={ - expandable_nav.learning - ? "Minimize learning tabs" - : "Expand learning tabs" - } - tooltipSide="right" - > - {expandable_nav.learning ? ( - - ) : ( - - )} - - )} -
- - navigate({ to: "/learn/tracks" })} - /> - navigate({ to: "/learn" })} - /> - navigate({ to: "/learn/lenses" })} - /> - navigate({ to: "/learn/levels" })} - /> - navigate({ to: "/chinese" })} - /> - navigate({ to: "/cantonese" })} - /> - navigate({ to: "/math-refresh" })} - /> - navigate({ to: "/culture-generale" })} - /> - {/* Personal */}
@@ -4162,19 +4225,21 @@ export function Sidebar({ } /> - {/* Side Projects */} + {/* Business */}
} - label="Side Projects" + label="Business" active={ active_view.startsWith("module48_") || active_view.startsWith("linkedin_") || active_view === "shopify_architect" || active_view === "shopify_skills" || active_view === "sonic_dna" || - active_view === "reference_tracks" + active_view === "reference_tracks" || + active_view.startsWith("brand_studio") || + active_view.startsWith("execution_playbook") } onClick={() => navigate({ to: "/module-48" })} /> @@ -4185,8 +4250,8 @@ export function Sidebar({ className="p-1 text-[var(--sb-sidebar-text-muted)] hover:text-[var(--sb-text-primary)] transition-colors" label={ expandable_nav.side_projects - ? "Minimize side projects tabs" - : "Expand side projects tabs" + ? "Minimize business tabs" + : "Expand business tabs" } tooltipSide="right" > @@ -4230,6 +4295,18 @@ export function Sidebar({ active={active_view === "reference_tracks"} onClick={() => navigate({ to: "/reference-tracks" })} /> + } + label="Brand Studio" + active={active_view.startsWith("brand_studio")} + onClick={() => navigate({ to: "/brand-studio" })} + /> + } + label="Execution Playbook" + active={active_view.startsWith("execution_playbook")} + onClick={() => navigate({ to: "/execution-playbook" })} + />
@@ -4451,7 +4528,13 @@ export function Sidebar({ active_view === "how_it_works_architecture" || active_view === "how_it_works_contracts" || active_view === "how_it_works_event_driven" || - active_view === "how_it_works_models" + active_view === "how_it_works_models" || + active_view.startsWith("ingestion_pipeline_") || + active_view.startsWith("chat_pipeline_") || + active_view.startsWith("career_intelligence_") || + active_view.startsWith("observability_") || + active_view.startsWith("architecture_deep_dive_") || + active_view === "mcp_ecosystem" } onClick={() => navigate({ to: "/how-it-works", search: { tab: "overview" } }) @@ -4532,6 +4615,42 @@ export function Sidebar({ }) } /> + } + label="Ingestion Pipeline" + active={active_view.startsWith("ingestion_pipeline_")} + onClick={() => navigate({ to: "/how-ingestion-works" })} + /> + } + label="Chat Pipeline" + active={active_view.startsWith("chat_pipeline_")} + onClick={() => navigate({ to: "/how-chat-works" })} + /> + } + label="Career Intelligence" + active={active_view.startsWith("career_intelligence_")} + onClick={() => navigate({ to: "/how-career-works" })} + /> + } + label="Observability" + active={active_view.startsWith("observability_")} + onClick={() => navigate({ to: "/how-monitor-works" })} + /> + } + label="Architecture Reference" + active={active_view.startsWith("architecture_deep_dive_")} + onClick={() => navigate({ to: "/how-architecture-works" })} + /> + } + label="MCP Ecosystem" + active={active_view === "mcp_ecosystem"} + onClick={() => navigate({ to: "/how-mcp-works" })} + /> diff --git a/frontend/src/router.tsx b/frontend/src/router.tsx index dae15a8..fc35e35 100644 --- a/frontend/src/router.tsx +++ b/frontend/src/router.tsx @@ -93,6 +93,7 @@ import { HowGpuWorksRoute } from "./routes/how-gpu-works"; import { PricingRoute } from "./routes/pricing"; import { CareerFoundationsRoute } from "./routes/career-foundations"; import { MathBridgeRoute } from "./routes/math-bridge"; +import { MathLandingRoute } from "./routes/math"; export type CareerTab = "accelerator" | "opportunities"; @@ -1939,9 +1940,19 @@ const VALID_CULTURE_GENERALE_TABS = new Set([ "history", ]); -const mathRefreshRoute = createRoute({ +/* ── Canonical /math hub ──────────────────────────────────────────── */ +const mathRoute = createRoute({ getParentRoute: () => appRoute, - path: "/math-refresh", + path: "/math", + errorComponent: ({ error, reset }) => ( + + ), + component: MathLandingRoute, +}); + +const mathRefreshCanonicalRoute = createRoute({ + getParentRoute: () => appRoute, + path: "/math/refresh", validateSearch: ( search: Record, ): MathRefreshSearchParams => { @@ -1966,6 +1977,52 @@ const mathRefreshRoute = createRoute({ component: MathRefreshRoute, }); +const mathBridgeCanonicalRoute = createRoute({ + getParentRoute: () => appRoute, + path: "/math/bridge", + validateSearch: (search: Record): MathBridgeSearchParams => { + const result: MathBridgeSearchParams = {}; + if ( + typeof search.tab === "string" && + VALID_MATH_BRIDGE_TABS.has(search.tab) + ) { + result.tab = search.tab as MathBridgeTab; + } + return result; + }, + errorComponent: ({ error, reset }) => ( + + ), + component: MathBridgeRoute, +}); + +/* ── Legacy redirects ────────────────────────────────────────────── */ +const mathRefreshRoute = createRoute({ + getParentRoute: () => appRoute, + path: "/math-refresh", + validateSearch: ( + search: Record, + ): MathRefreshSearchParams => { + const result: MathRefreshSearchParams = {}; + if ( + typeof search.track === "string" && + VALID_MATH_REFRESH_TRACKS.has(search.track) + ) { + result.track = search.track as MathRefreshTrack; + } + if ( + typeof search.tab === "string" && + VALID_MATH_REFRESH_TABS.has(search.tab) + ) { + result.tab = search.tab as MathRefreshTab; + } + return result; + }, + beforeLoad: ({ search }) => { + throw redirect({ to: "/math/refresh", search, replace: true }); + }, +}); + const cultureGeneraleRoute = createRoute({ getParentRoute: () => appRoute, path: "/culture-generale", @@ -2117,10 +2174,9 @@ const mathBridgeRoute = createRoute({ } return result; }, - errorComponent: ({ error, reset }) => ( - - ), - component: MathBridgeRoute, + beforeLoad: ({ search }) => { + throw redirect({ to: "/math/bridge", search, replace: true }); + }, }); const careerFoundationsRoute = createRoute({ @@ -2402,6 +2458,9 @@ const routeTree = rootRoute.addChildren([ harnessDevToolsRoute, gpuForAIRoute, bioAugmentationRoute, + mathRoute, + mathRefreshCanonicalRoute, + mathBridgeCanonicalRoute, mathRefreshRoute, cultureGeneraleRoute, cognitiveToolkitRoute, diff --git a/frontend/src/routes/math.tsx b/frontend/src/routes/math.tsx new file mode 100644 index 0000000..34b813a --- /dev/null +++ b/frontend/src/routes/math.tsx @@ -0,0 +1,23 @@ +import { lazy, Suspense } from "react"; + +const MathLandingView = lazy(() => + import("../views/MathLandingView").then((m) => ({ + default: m.MathLandingView, + })), +); + +function LoadingSpinner() { + return ( +
+
+
+ ); +} + +export function MathLandingRoute() { + return ( + }> + + + ); +} diff --git a/frontend/src/views/MathLandingView.tsx b/frontend/src/views/MathLandingView.tsx new file mode 100644 index 0000000..3331f7a --- /dev/null +++ b/frontend/src/views/MathLandingView.tsx @@ -0,0 +1,64 @@ +import { useNavigate } from "@tanstack/react-router"; +import { motion } from "framer-motion"; + +import { PremiumHero, PremiumPage } from "../components/layout/PremiumShell"; +import { useDocumentTitle } from "../hooks/useDocumentTitle"; + +const ENTRIES = [ + { + key: "refresh", + label: "Math Refresh", + description: + "Study the French math curriculum at your own pace - from arithmetic fundamentals through prepa-level linear algebra and probability.", + to: "/math/refresh" as const, + color: "#55cdff", + }, + { + key: "bridge", + label: "Math Bridge Program", + description: + "Test your skills across four levels, track your readiness, and close gaps with interactive micro-checks.", + to: "/math/bridge" as const, + color: "#ffc47c", + }, +] as const; + +export function MathLandingView() { + useDocumentTitle("Math"); + const navigate = useNavigate(); + + return ( + + +
+ {ENTRIES.map((entry, i) => ( + navigate({ to: entry.to })} + className="text-left rounded-[12px] border border-white/[0.08] bg-white/[0.02] p-6 transition-colors duration-150 hover:border-white/[0.12] hover:bg-white/[0.035]" + style={{ borderLeftWidth: 2, borderLeftColor: entry.color }} + > +

+ {entry.label} +

+

+ {entry.description} +

+
+ ))} +
+
+ ); +} From 82caf24700f5f2f5053468c181c51df877099583 Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Sun, 15 Mar 2026 18:36:04 +0100 Subject: [PATCH 02/10] fix(nav): freeze left sidebar information architecture for v1 Separate Core (Ingest/Library/Chat) from Learning/Career/Projects/Organizer with a thin divider. Separate Mission Control from Reference/Personal/Business with a divider after Workspaces. Add Business section label for clarity. --- frontend/src/components/Sidebar.tsx | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index 81d8b62..92beabb 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -2408,6 +2408,9 @@ export function Sidebar({ onClick={() => navigate({ to: "/chat" })} className="nav-accent-glow-red" /> + +
+ {/* Learning */}
@@ -2892,6 +2895,9 @@ export function Sidebar({ } /> + +
+ {/* Reference (collapsed by default) */}
@@ -4226,6 +4232,7 @@ export function Sidebar({ /> {/* Business */} + {sidebarExpanded && }
Date: Mon, 16 Mar 2026 13:42:32 +0100 Subject: [PATCH 03/10] fix(math): make /math the canonical navigator for both curriculum tracks Enhance MathLandingView to show inline navigation for Zero to One (6 topics) and Prepa ML (8 topics) as clickable pills that link directly to /math/refresh?track=...&tab=... No sidebar changes. Math Bridge card kept at bottom. --- frontend/src/views/MathLandingView.tsx | 138 ++++++++++++++++++++----- 1 file changed, 111 insertions(+), 27 deletions(-) diff --git a/frontend/src/views/MathLandingView.tsx b/frontend/src/views/MathLandingView.tsx index 3331f7a..d9026e6 100644 --- a/frontend/src/views/MathLandingView.tsx +++ b/frontend/src/views/MathLandingView.tsx @@ -1,28 +1,57 @@ import { useNavigate } from "@tanstack/react-router"; import { motion } from "framer-motion"; +import { ChevronRight } from "lucide-react"; import { PremiumHero, PremiumPage } from "../components/layout/PremiumShell"; import { useDocumentTitle } from "../hooks/useDocumentTitle"; -const ENTRIES = [ +/* ------------------------------------------------------------------ */ +/* Track topic data */ +/* ------------------------------------------------------------------ */ + +const ZERO_TO_ONE_TOPICS = [ + { tab: "methode", label: "Methode" }, + { tab: "diagnostic", label: "Diagnostic" }, + { tab: "college", label: "College (6e-3e)" }, + { tab: "lycee", label: "Seconde-Premiere" }, + { tab: "terminale", label: "Terminale S" }, + { tab: "evaluation", label: "Auto-evaluation" }, +] as const; + +const PREPA_ML_TOPICS = [ + { tab: "methode", label: "Methode" }, + { tab: "linear_algebra", label: "Algebre Lineaire" }, + { tab: "analysis", label: "Analyse" }, + { tab: "probability", label: "Probabilites & Stats" }, + { tab: "applied_ml", label: "Maths Appliquees ML" }, + { tab: "geometry_3d", label: "Geometrie & 3D" }, + { tab: "dynamics_physics", label: "Dynamique & Physique" }, + { tab: "evaluation", label: "Auto-evaluation" }, +] as const; + +const TRACKS = [ { - key: "refresh", - label: "Math Refresh", + key: "zero_to_one", + title: "Maths - Zero to One", description: - "Study the French math curriculum at your own pace - from arithmetic fundamentals through prepa-level linear algebra and probability.", - to: "/math/refresh" as const, + "French curriculum from arithmetic through Terminale S - build fluency from scratch.", color: "#55cdff", + topics: ZERO_TO_ONE_TOPICS, }, { - key: "bridge", - label: "Math Bridge Program", + key: "prepa_ml", + title: "Maths - Prepa ML", description: - "Test your skills across four levels, track your readiness, and close gaps with interactive micro-checks.", - to: "/math/bridge" as const, + "University-level math for machine learning - linear algebra, calculus, probability, and applied methods.", color: "#ffc47c", + topics: PREPA_ML_TOPICS, }, ] as const; +/* ------------------------------------------------------------------ */ +/* Component */ +/* ------------------------------------------------------------------ */ + export function MathLandingView() { useDocumentTitle("Math"); const navigate = useNavigate(); @@ -35,30 +64,85 @@ export function MathLandingView() { title="Math" description="Your math study hub - pick a curriculum track to review, or test your readiness with the bridge program." /> -
- {ENTRIES.map((entry, i) => ( - + {TRACKS.map((track, ti) => ( + navigate({ to: entry.to })} - className="text-left rounded-[12px] border border-white/[0.08] bg-white/[0.02] p-6 transition-colors duration-150 hover:border-white/[0.12] hover:bg-white/[0.035]" - style={{ borderLeftWidth: 2, borderLeftColor: entry.color }} + transition={{ delay: ti * 0.06, duration: 0.3 }} + className="rounded-[12px] border border-white/[0.08] bg-white/[0.02] p-5" + style={{ borderLeftWidth: 2, borderLeftColor: track.color }} > -

+ navigate({ + to: "/math/refresh", + search: { track: track.key, tab: "methode" }, + }) + } + className="flex w-full items-center justify-between text-left group" > - {entry.label} -

-

- {entry.description} -

-
+
+

+ {track.title} +

+

+ {track.description} +

+
+ + + +
+ {track.topics.map((topic, i) => ( + + navigate({ + to: "/math/refresh", + search: { track: track.key, tab: topic.tab }, + }) + } + className="rounded-[4px] border border-white/[0.08] bg-white/[0.04] px-3 py-1.5 text-[15px] text-[#d0d6e0] transition-colors duration-150 hover:border-white/[0.15] hover:bg-white/[0.07] hover:text-[#f7f8f8]" + > + {topic.label} + + ))} +
+ ))}
+ + {/* Bridge program */} + navigate({ to: "/math/bridge" })} + className="mt-6 w-full text-left rounded-[12px] border border-white/[0.08] bg-white/[0.02] p-6 transition-colors duration-150 hover:border-white/[0.12] hover:bg-white/[0.035]" + style={{ borderLeftWidth: 2, borderLeftColor: "#5e6ad2" }} + > +

+ Math Bridge Program +

+

+ Test your skills across four levels, track your readiness, and close + gaps with interactive micro-checks. +

+
); } From 1f34ca09578a34c66f0b1d1b4e92e5e5ee21dfdb Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Mon, 16 Mar 2026 21:13:27 +0100 Subject: [PATCH 04/10] feat(curriculum): unified track system with resource tracks and sidebar collapse Backend: - Migration 0084: track_resources table, track_type on tracks, color on modules - Extend curriculum models, loader, store, and API for resource tracks - Resource tracks skip concept validation, support curated link collections - New endpoints: GET track resources, GET module resources - Seed job supports --type filter and skips _ prefixed files Data: - 11 resource track YAML files extracted from hardcoded frontend views - 569 resources across 79 modules seeded into database - Tracks: AI Engineering, Frontend Eng, GPU for AI, Embodied AI, Bio-Augmentation, Databases, Applied Systems, Interview Prep, Freelance Strategy, Cognitive Toolkit, Behavioral Design - Displaced prep resources saved in _prep-displaced.yaml for merging Frontend: - Generic TrackDetailView handles both concept and resource tracks - Resource tracks render with tabbed modules and ResourceCards - CurriculumTracksView shows both track types with correct metadata - Sidebar collapsed: removed entire Reference section (~1,300 lines) - Removed Lenses, Levels, Cognitive Toolkit, Behavioral Design from nav - Added Dev Reference nav item linking to existing dev-ref page - Added inline tab navigators to DevRefView and DomainOntologyView - Frontend types extended with TrackResource, track_type, color fields --- backend/api/curriculum.py | 55 +- backend/jobs/seed_curriculum.py | 65 +- backend/models/curriculum.py | 43 +- .../services/infrastructure/db_migrations.py | 45 + .../services/learning/curriculum_loader.py | 76 +- backend/services/learning/curriculum_store.py | 62 +- curriculum/tracks/_prep-displaced.yaml | 395 +++++ curriculum/tracks/ai-engineering.yaml | 407 +++++ curriculum/tracks/applied-systems.yaml | 229 +++ curriculum/tracks/behavioral-design.yaml | 317 ++++ curriculum/tracks/bio-augmentation.yaml | 385 +++++ curriculum/tracks/cognitive-toolkit.yaml | 1038 +++++++++++++ curriculum/tracks/databases.yaml | 372 +++++ curriculum/tracks/embodied-ai.yaml | 425 ++++++ curriculum/tracks/freelance-strategy.yaml | 156 ++ curriculum/tracks/frontend-engineering.yaml | 357 +++++ curriculum/tracks/gpu-for-ai.yaml | 512 +++++++ curriculum/tracks/interview-prep.yaml | 138 ++ frontend/src/components/Sidebar.tsx | 1324 +---------------- frontend/src/lib/api/endpoints.ts | 19 + frontend/src/lib/api/types.ts | 18 + frontend/src/lib/query/keys.ts | 2 + frontend/src/routes/curriculum.tsx | 16 +- frontend/src/views/CurriculumTracksView.tsx | 24 +- frontend/src/views/DevRefView.tsx | 159 +- frontend/src/views/DomainOntologyView.tsx | 35 +- frontend/src/views/TrackDetailView.tsx | 454 ++++++ 27 files changed, 5747 insertions(+), 1381 deletions(-) create mode 100644 curriculum/tracks/_prep-displaced.yaml create mode 100644 curriculum/tracks/ai-engineering.yaml create mode 100644 curriculum/tracks/applied-systems.yaml create mode 100644 curriculum/tracks/behavioral-design.yaml create mode 100644 curriculum/tracks/bio-augmentation.yaml create mode 100644 curriculum/tracks/cognitive-toolkit.yaml create mode 100644 curriculum/tracks/databases.yaml create mode 100644 curriculum/tracks/embodied-ai.yaml create mode 100644 curriculum/tracks/freelance-strategy.yaml create mode 100644 curriculum/tracks/frontend-engineering.yaml create mode 100644 curriculum/tracks/gpu-for-ai.yaml create mode 100644 curriculum/tracks/interview-prep.yaml create mode 100644 frontend/src/views/TrackDetailView.tsx diff --git a/backend/api/curriculum.py b/backend/api/curriculum.py index 471a5b1..d609695 100644 --- a/backend/api/curriculum.py +++ b/backend/api/curriculum.py @@ -1,4 +1,4 @@ -"""Curriculum API - read-only track and module retrieval.""" +"""Curriculum API - track, module, and resource retrieval.""" import logging @@ -11,6 +11,7 @@ CurriculumModuleSummary, CurriculumTrackDetail, CurriculumTrackSummary, + TrackResource, ) from backend.services.learning.curriculum_store import ( DEFAULT_USER_ID, @@ -18,6 +19,8 @@ get_module, get_module_progress, get_track, + list_module_resources, + list_track_resources, list_tracks, update_concept_progress, ) @@ -54,6 +57,19 @@ class ModuleProgressResponse(BaseModel): not_started: int +class ModuleResourcesResponse(BaseModel): + track_id: str + module_id: str + resources: list[TrackResource] + total: int + + +class TrackResourcesResponse(BaseModel): + track_id: str + resources: list[TrackResource] + total: int + + @router.get("/tracks", response_model=TrackListResponse) def list_curriculum_tracks(published_only: bool = Query(True)): """List all curriculum tracks.""" @@ -81,12 +97,16 @@ def get_curriculum_track(track_id: str): @router.get("/tracks/{track_id}/modules/{module_id}", response_model=CurriculumModuleDetail) def get_curriculum_module(track_id: str, module_id: str): - """Get a module with ordered concept refs and readiness state.""" + """Get a module with ordered concept refs, readiness state, and resources.""" module = get_module(track_id, module_id) if not module: raise HTTPException(status_code=404, detail="Module not found") return CurriculumModuleDetail( - **{**module, "concepts": [ConceptRef(**c) for c in module["concepts"]]} + **{ + **module, + "concepts": [ConceptRef(**c) for c in module["concepts"]], + "resources": [TrackResource(**r) for r in module["resources"]], + } ) @@ -106,6 +126,35 @@ def get_curriculum_module_progress( return ModuleProgressResponse(**{k: v for k, v in progress.items() if k != "concepts"}) +@router.get( + "/tracks/{track_id}/modules/{module_id}/resources", + response_model=ModuleResourcesResponse, +) +def get_module_resources_endpoint(track_id: str, module_id: str): + """List all resources for a specific module.""" + resources = list_module_resources(track_id, module_id) + return ModuleResourcesResponse( + track_id=track_id, + module_id=module_id, + resources=[TrackResource(**r) for r in resources], + total=len(resources), + ) + + +@router.get( + "/tracks/{track_id}/resources", + response_model=TrackResourcesResponse, +) +def get_track_resources_endpoint(track_id: str): + """List all resources for a track.""" + resources = list_track_resources(track_id) + return TrackResourcesResponse( + track_id=track_id, + resources=[TrackResource(**r) for r in resources], + total=len(resources), + ) + + @router.put("/progress/{concept_id}", response_model=ConceptProgressResponse) def update_progress(concept_id: str, body: ProgressUpdateRequest): """Update learning progress for a concept.""" diff --git a/backend/jobs/seed_curriculum.py b/backend/jobs/seed_curriculum.py index 8b8e34b..73879fa 100644 --- a/backend/jobs/seed_curriculum.py +++ b/backend/jobs/seed_curriculum.py @@ -3,9 +3,14 @@ Loads all YAML files from curriculum/tracks/, validates each against the canonical concept registry and dossier layer, and upserts into DB. +Supports two track types: + - concept: validated against canonical concepts (existing behavior) + - resource: curated link/reference collections (no concept validation) + Usage: python -m backend.jobs.seed_curriculum python -m backend.jobs.seed_curriculum --dry-run + python -m backend.jobs.seed_curriculum --type resource """ import argparse @@ -16,7 +21,7 @@ logger = logging.getLogger(__name__) -def seed(dry_run: bool = False) -> bool: +def seed(dry_run: bool = False, track_type_filter: str | None = None) -> bool: """Load and seed all curriculum track files. Returns True if all succeeded.""" from backend.services.chat.history import init_db @@ -34,7 +39,11 @@ def seed(dry_run: bool = False) -> bool: logger.warning("No curriculum track files found in curriculum/tracks/") return False + # Skip temporary/reference files (prefixed with _) + track_files = [p for p in track_files if not p.name.startswith("_")] + all_valid = True + seeded_count = 0 for path in track_files: logger.info("Loading %s", path.name) @@ -45,6 +54,11 @@ def seed(dry_run: bool = False) -> bool: all_valid = False continue + # Filter by track type if specified + if track_type_filter and track.track_type != track_type_filter: + logger.info("Skipping %s (type=%s, filter=%s)", track.id, track.track_type, track_type_filter) + continue + result = validate_track(track) # Print errors @@ -63,39 +77,50 @@ def seed(dry_run: bool = False) -> bool: if not result.valid: continue - # Readiness summary + # Summary total_concepts = sum(len(m.concepts) for m in track.modules) - print(f"\nValidated track '{track.id}'") + total_resources = sum(len(m.resources) for m in track.modules) + print(f"\nValidated track '{track.id}' (type={track.track_type})") print(f" Modules: {len(track.modules)}") - print(f" Concept links: {total_concepts}") - print(f" Warnings: {len(result.warnings)}") - - # Per-module readiness breakdown - from backend.services.learning.curriculum_loader import _get_conn - from backend.services.learning.curriculum_store import derive_readiness - - conn = _get_conn() - for module in track.modules: - counts: dict[str, int] = {"rich": 0, "grounded": 0, "scaffolded": 0} - for c in module.concepts: - r = derive_readiness(conn, c.concept_id) - counts[r] = counts.get(r, 0) + 1 - parts = [f"{v} {k}" for k, v in counts.items() if v > 0] - print(f" [{module.sort_order}] {module.title}: {len(module.concepts)} concepts ({', '.join(parts)})") + + if track.track_type == "concept": + print(f" Concept links: {total_concepts}") + + # Per-module readiness breakdown + from backend.services.learning.curriculum_loader import _get_conn + from backend.services.learning.curriculum_store import derive_readiness + + conn = _get_conn() + for module in track.modules: + counts: dict[str, int] = {"rich": 0, "grounded": 0, "scaffolded": 0} + for c in module.concepts: + r = derive_readiness(conn, c.concept_id) + counts[r] = counts.get(r, 0) + 1 + parts = [f"{v} {k}" for k, v in counts.items() if v > 0] + print(f" [{module.sort_order}] {module.title}: {len(module.concepts)} concepts ({', '.join(parts)})") + else: + print(f" Resources: {total_resources}") + for module in track.modules: + print(f" [{module.sort_order}] {module.title}: {len(module.resources)} resources") if dry_run: print("\n (dry run - not seeding)") else: + from backend.services.learning.curriculum_loader import _get_conn + conn = _get_conn() seed_track(track, conn) - print("\nSeeded curriculum successfully.") + seeded_count += 1 + print(" Seeded.") + print(f"\nDone. Seeded {seeded_count} tracks.") return all_valid if __name__ == "__main__": parser = argparse.ArgumentParser(description="Seed curriculum tracks from YAML files") parser.add_argument("--dry-run", action="store_true", help="Validate only, do not write to DB") + parser.add_argument("--type", choices=["concept", "resource"], default=None, help="Only seed tracks of this type") args = parser.parse_args() - success = seed(dry_run=args.dry_run) + success = seed(dry_run=args.dry_run, track_type_filter=args.type) sys.exit(0 if success else 1) diff --git a/backend/models/curriculum.py b/backend/models/curriculum.py index aa5e9d4..dc0d60b 100644 --- a/backend/models/curriculum.py +++ b/backend/models/curriculum.py @@ -10,13 +10,27 @@ class CurriculumModuleConceptFile(BaseModel): sort_order: int +class ResourceFile(BaseModel): + """A curated resource within a module (link, reference, or knowledge item).""" + + name: str + url: str | None = None + description: str = "" + detail: str | None = None + resource_type: str = "link" + sort_order: int = 0 + metadata_json: str | None = None + + class CurriculumModuleFile(BaseModel): id: str title: str objective: str - estimated_time_minutes: int + estimated_time_minutes: int = 30 sort_order: int - concepts: list[CurriculumModuleConceptFile] + color: str | None = None + concepts: list[CurriculumModuleConceptFile] = [] + resources: list[ResourceFile] = [] class CurriculumTrackFile(BaseModel): @@ -24,6 +38,7 @@ class CurriculumTrackFile(BaseModel): title: str description: str difficulty: str + track_type: str = "concept" is_published: bool = True modules: list[CurriculumModuleFile] @@ -31,6 +46,19 @@ class CurriculumTrackFile(BaseModel): # --- Runtime response models --- +class TrackResource(BaseModel): + """A curated resource attached to a track module.""" + + id: int + name: str + url: str | None = None + description: str = "" + detail: str | None = None + resource_type: str = "link" + sort_order: int = 0 + metadata_json: str | None = None + + class ConceptRef(BaseModel): """A concept referenced by a curriculum module, joined with concept metadata.""" @@ -52,7 +80,9 @@ class CurriculumModuleDetail(BaseModel): objective: str estimated_time_minutes: int sort_order: int - concepts: list[ConceptRef] + color: str | None = None + concepts: list[ConceptRef] = [] + resources: list[TrackResource] = [] class CurriculumModuleSummary(BaseModel): @@ -61,7 +91,9 @@ class CurriculumModuleSummary(BaseModel): objective: str estimated_time_minutes: int sort_order: int - concept_count: int + color: str | None = None + concept_count: int = 0 + resource_count: int = 0 class CurriculumTrackDetail(BaseModel): @@ -69,6 +101,7 @@ class CurriculumTrackDetail(BaseModel): title: str description: str difficulty: str + track_type: str = "concept" is_published: bool modules: list[CurriculumModuleSummary] @@ -78,9 +111,11 @@ class CurriculumTrackSummary(BaseModel): title: str description: str difficulty: str + track_type: str = "concept" is_published: bool module_count: int concept_count: int + resource_count: int = 0 # --- Validation results --- diff --git a/backend/services/infrastructure/db_migrations.py b/backend/services/infrastructure/db_migrations.py index d60ca2f..4af98d8 100644 --- a/backend/services/infrastructure/db_migrations.py +++ b/backend/services/infrastructure/db_migrations.py @@ -2961,6 +2961,50 @@ def _migration_083_concepts_lens_level(conn: sqlite3.Connection) -> None: ) +def _migration_084_track_resources(conn: sqlite3.Connection) -> None: + """Add track_type to tracks, color to modules, and track_resources table. + + Supports resource-oriented tracks (curated link collections) alongside + concept-oriented tracks (the existing curriculum model). + """ + # track_type on curriculum_tracks (concept vs resource) + try: + conn.execute( + "ALTER TABLE curriculum_tracks ADD COLUMN track_type TEXT NOT NULL DEFAULT 'concept'" + ) + except sqlite3.OperationalError: + pass # Column already exists + + # color on curriculum_modules (for tab coloring in resource tracks) + try: + conn.execute("ALTER TABLE curriculum_modules ADD COLUMN color TEXT") + except sqlite3.OperationalError: + pass + + # track_resources table + conn.executescript(""" + CREATE TABLE IF NOT EXISTS track_resources ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + track_id TEXT NOT NULL, + module_id TEXT NOT NULL, + name TEXT NOT NULL, + url TEXT, + description TEXT NOT NULL DEFAULT '', + detail TEXT, + resource_type TEXT NOT NULL DEFAULT 'link' + CHECK(resource_type IN ('link', 'reference', 'knowledge')), + sort_order INTEGER NOT NULL DEFAULT 0, + metadata_json TEXT, + created_at TEXT NOT NULL DEFAULT (datetime('now')), + FOREIGN KEY (track_id) REFERENCES curriculum_tracks(id) ON DELETE CASCADE, + FOREIGN KEY (module_id, track_id) REFERENCES curriculum_modules(id, track_id) ON DELETE CASCADE + ); + + CREATE INDEX IF NOT EXISTS idx_track_resources_module + ON track_resources(track_id, module_id); + """) + + MIGRATIONS: tuple[MigrationStep, ...] = ( MigrationStep("0001", "chat_core_tables", _migration_001_chat_core), MigrationStep("0002", "messages_max_rerank_score", _migration_002_messages_max_rerank_score), @@ -3045,6 +3089,7 @@ def _migration_083_concepts_lens_level(conn: sqlite3.Connection) -> None: MigrationStep("0081", "curriculum_tables", _migration_081_curriculum_tables), MigrationStep("0082", "concept_progress_per_user", _migration_082_concept_progress_per_user), MigrationStep("0083", "concepts_lens_level", _migration_083_concepts_lens_level), + MigrationStep("0084", "track_resources", _migration_084_track_resources), ) diff --git a/backend/services/learning/curriculum_loader.py b/backend/services/learning/curriculum_loader.py index 12caf90..86b4630 100644 --- a/backend/services/learning/curriculum_loader.py +++ b/backend/services/learning/curriculum_loader.py @@ -2,9 +2,12 @@ Loads YAML track files from curriculum/tracks/, validates against the canonical concept registry and dossier layer, and upserts into -curriculum_tracks / curriculum_modules / curriculum_module_concepts tables. +curriculum_tracks / curriculum_modules / curriculum_module_concepts / +track_resources tables. -Curriculum contains NO knowledge content. It is a thin ordering layer. +Two track types: + - concept: thin ordering layer over canonical concepts (validated against DB) + - resource: curated link/reference collections (no concept validation) """ import difflib @@ -45,6 +48,7 @@ def validate_track( ) -> ValidationResult: """Validate a curriculum track against the canonical concept registry. + For resource tracks (track_type='resource'), concept validation is skipped. Returns a ValidationResult with blocking errors and non-blocking warnings. """ if conn is None: @@ -65,12 +69,6 @@ def validate_track( # Module-level checks seen_module_ids: set[str] = set() seen_module_sort_orders: set[int] = set() - all_concept_ids_in_track: dict[str, list[str]] = {} # concept_id -> [module_ids] - - # Pre-fetch all canonical concept IDs for fuzzy matching - all_canonical_ids = [ - r["id"] for r in conn.execute("SELECT id FROM concepts").fetchall() - ] for module in track.modules: # Duplicate module id @@ -94,6 +92,27 @@ def validate_track( message=f"Module '{module.id}' title is missing", field=f"modules[{module.id}].title", )) + + # Resource tracks skip concept validation entirely + if track.track_type == "resource": + # Validate resources instead + for module in track.modules: + if not module.resources and not module.concepts: + warnings.append(ValidationWarning( + message=f"Module '{module.id}' has no resources or concepts", + field=f"modules[{module.id}]", + )) + return ValidationResult(valid=len(errors) == 0, errors=errors, warnings=warnings) + + # --- Concept track validation (existing logic) --- + all_concept_ids_in_track: dict[str, list[str]] = {} + + # Pre-fetch all canonical concept IDs for fuzzy matching + all_canonical_ids = [ + r["id"] for r in conn.execute("SELECT id FROM concepts").fetchall() + ] + + for module in track.modules: if not module.objective: errors.append(ValidationError( message=f"Module '{module.id}' objective is missing", @@ -199,25 +218,31 @@ def seed_track(track: CurriculumTrackFile, conn: sqlite3.Connection | None = Non if conn is None: conn = _get_conn() - # Delete existing track data (CASCADE handles modules + concepts) + # Ensure FK cascade works + conn.execute("PRAGMA foreign_keys = ON") + + # Delete existing track data (CASCADE handles modules + concepts + resources) conn.execute("DELETE FROM curriculum_tracks WHERE id = ?", (track.id,)) # Insert track conn.execute( - """INSERT INTO curriculum_tracks (id, title, description, difficulty, sort_order, is_published) - VALUES (?, ?, ?, ?, ?, ?)""", - (track.id, track.title, track.description.strip(), track.difficulty, 0, int(track.is_published)), + """INSERT INTO curriculum_tracks (id, title, description, difficulty, track_type, sort_order, is_published) + VALUES (?, ?, ?, ?, ?, ?, ?)""", + (track.id, track.title, track.description.strip(), track.difficulty, + track.track_type, 0, int(track.is_published)), ) - # Insert modules and concept links + # Insert modules, concept links, and resources for module in track.modules: conn.execute( """INSERT INTO curriculum_modules - (id, track_id, title, objective, estimated_time_minutes, sort_order) - VALUES (?, ?, ?, ?, ?, ?)""", + (id, track_id, title, objective, estimated_time_minutes, sort_order, color) + VALUES (?, ?, ?, ?, ?, ?, ?)""", (module.id, track.id, module.title, module.objective.strip(), - module.estimated_time_minutes, module.sort_order), + module.estimated_time_minutes, module.sort_order, module.color), ) + + # Concept links (for concept tracks) for concept_ref in module.concepts: conn.execute( """INSERT INTO curriculum_module_concepts @@ -226,8 +251,25 @@ def seed_track(track: CurriculumTrackFile, conn: sqlite3.Connection | None = Non (module.id, track.id, concept_ref.concept_id, concept_ref.sort_order), ) + # Resources (for resource tracks) + for resource in module.resources: + conn.execute( + """INSERT INTO track_resources + (track_id, module_id, name, url, description, detail, + resource_type, sort_order, metadata_json) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""", + (track.id, module.id, resource.name, resource.url, + resource.description, resource.detail, resource.resource_type, + resource.sort_order, resource.metadata_json), + ) + conn.commit() - logger.info("Seeded track '%s' with %d modules", track.id, len(track.modules)) + resource_count = sum(len(m.resources) for m in track.modules) + concept_count = sum(len(m.concepts) for m in track.modules) + logger.info( + "Seeded track '%s' (%s) with %d modules, %d concepts, %d resources", + track.id, track.track_type, len(track.modules), concept_count, resource_count, + ) def load_all_tracks() -> list[Path]: diff --git a/backend/services/learning/curriculum_store.py b/backend/services/learning/curriculum_store.py index 52b0082..44c137f 100644 --- a/backend/services/learning/curriculum_store.py +++ b/backend/services/learning/curriculum_store.py @@ -82,16 +82,19 @@ def _concept_readiness(conn: sqlite3.Connection, concept_id: str) -> tuple[str, def list_tracks(published_only: bool = True) -> list[dict]: - """List all curriculum tracks with module/concept counts.""" + """List all curriculum tracks with module/concept/resource counts.""" conn = _get_conn() where = "WHERE t.is_published = 1" if published_only else "" rows = conn.execute(f""" - SELECT t.id, t.title, t.description, t.difficulty, t.is_published, + SELECT t.id, t.title, t.description, t.difficulty, t.track_type, + t.is_published, COUNT(DISTINCT m.id) as module_count, - COUNT(DISTINCT mc.concept_id) as concept_count + COUNT(DISTINCT mc.concept_id) as concept_count, + COUNT(DISTINCT tr.id) as resource_count FROM curriculum_tracks t LEFT JOIN curriculum_modules m ON m.track_id = t.id LEFT JOIN curriculum_module_concepts mc ON mc.track_id = t.id AND mc.module_id = m.id + LEFT JOIN track_resources tr ON tr.track_id = t.id AND tr.module_id = m.id {where} GROUP BY t.id ORDER BY t.sort_order, t.title @@ -100,7 +103,7 @@ def list_tracks(published_only: bool = True) -> list[dict]: def get_track(track_id: str) -> dict | None: - """Get a track with its modules (summary, no concept details).""" + """Get a track with its modules (summary, no concept/resource details).""" conn = _get_conn() track_row = conn.execute( "SELECT * FROM curriculum_tracks WHERE id = ?", (track_id,) @@ -109,10 +112,13 @@ def get_track(track_id: str) -> dict | None: return None module_rows = conn.execute( - """SELECT m.id, m.title, m.objective, m.estimated_time_minutes, m.sort_order, - COUNT(mc.concept_id) as concept_count + """SELECT m.id, m.title, m.objective, m.estimated_time_minutes, + m.sort_order, m.color, + COUNT(DISTINCT mc.concept_id) as concept_count, + COUNT(DISTINCT tr.id) as resource_count FROM curriculum_modules m LEFT JOIN curriculum_module_concepts mc ON mc.module_id = m.id AND mc.track_id = m.track_id + LEFT JOIN track_resources tr ON tr.module_id = m.id AND tr.track_id = m.track_id WHERE m.track_id = ? GROUP BY m.id ORDER BY m.sort_order""", @@ -124,6 +130,7 @@ def get_track(track_id: str) -> dict | None: "title": track_row["title"], "description": track_row["description"], "difficulty": track_row["difficulty"], + "track_type": track_row["track_type"], "is_published": bool(track_row["is_published"]), "modules": [dict(r) for r in module_rows], } @@ -158,6 +165,16 @@ def get_module(track_id: str, module_id: str) -> dict | None: "is_required": bool(cr["is_required"]), }) + # Resources for this module + resource_rows = conn.execute( + """SELECT id, name, url, description, detail, resource_type, + sort_order, metadata_json + FROM track_resources + WHERE track_id = ? AND module_id = ? + ORDER BY sort_order""", + (track_id, module_id), + ).fetchall() + return { "id": module_row["id"], "track_id": module_row["track_id"], @@ -165,10 +182,43 @@ def get_module(track_id: str, module_id: str) -> dict | None: "objective": module_row["objective"], "estimated_time_minutes": module_row["estimated_time_minutes"], "sort_order": module_row["sort_order"], + "color": module_row["color"], "concepts": concepts, + "resources": [dict(r) for r in resource_rows], } +def list_module_resources(track_id: str, module_id: str) -> list[dict]: + """List all resources for a specific module.""" + conn = _get_conn() + rows = conn.execute( + """SELECT id, name, url, description, detail, resource_type, + sort_order, metadata_json + FROM track_resources + WHERE track_id = ? AND module_id = ? + ORDER BY sort_order""", + (track_id, module_id), + ).fetchall() + return [dict(r) for r in rows] + + +def list_track_resources(track_id: str) -> list[dict]: + """List all resources for a track, grouped by module.""" + conn = _get_conn() + rows = conn.execute( + """SELECT tr.id, tr.module_id, tr.name, tr.url, tr.description, + tr.detail, tr.resource_type, tr.sort_order, tr.metadata_json, + m.title as module_title, m.sort_order as module_sort_order, + m.color as module_color + FROM track_resources tr + JOIN curriculum_modules m ON m.id = tr.module_id AND m.track_id = tr.track_id + WHERE tr.track_id = ? + ORDER BY m.sort_order, tr.sort_order""", + (track_id,), + ).fetchall() + return [dict(r) for r in rows] + + def update_concept_progress( concept_id: str, status: str, diff --git a/curriculum/tracks/_prep-displaced.yaml b/curriculum/tracks/_prep-displaced.yaml new file mode 100644 index 0000000..ba40dbe --- /dev/null +++ b/curriculum/tracks/_prep-displaced.yaml @@ -0,0 +1,395 @@ +# Displaced resources from Interview Prep (PrepView.tsx) +# These tabs are moving to other tracks. Copy-paste into the target track files. +# Generated 2026-03-16. + +# ============================================================================ +# sql → Dev Reference +# ============================================================================ +- id: sql + title: SQL Prep + objective: > + Get fast at SQL - pattern drills, timed exercises, and interview-style practice. + color: "#ffc47c" + target_track: dev-reference + resources: + - name: StrataScratch + url: https://www.stratascratch.com + description: Company-tagged SQL interview questions and data projects. + detail: Use as your primary timed SQL practice bank. + sort_order: 1 + - name: HelloInterview + url: https://www.hellointerview.com + description: Interview prep plans and guided data interview workflows. + detail: Use for framing, debriefing, and role-specific drill plans. + sort_order: 2 + - name: DesignGurus + url: https://www.designgurus.io + description: SQL patterns, database concepts, and interview strategy courses. + detail: Use for conceptual refresh before timed SQL sessions. + sort_order: 3 + - name: Interviewing.io + url: https://interviewing.io + description: Live interviewer feedback on SQL and analytics communication. + detail: Use to simulate back-and-forth SQL rounds. + sort_order: 4 + +# ============================================================================ +# docker → Dev Reference +# ============================================================================ +- id: docker + title: Docker + objective: > + Know Docker well enough to answer any interview question on it. + color: "#5e6ad2" + target_track: dev-reference + resources: + - name: Docker Official Docs + url: https://docs.docker.com/get-started/ + description: Authoritative reference for Docker concepts, CLI, and Compose. + detail: Use as the primary lookup for commands, Dockerfile syntax, and networking. + sort_order: 1 + - name: Docker Deep Dive (Nigel Poulton) + url: https://www.amazon.com/Docker-Deep-Dive-Nigel-Poulton/dp/1916585256 + description: Concept-to-production Docker book - images, containers, networking, volumes, security. + detail: Use for structured learning and interview scenario prep. + sort_order: 2 + - name: Dockerfile Best Practices + url: https://docs.docker.com/build/building/best-practices/ + description: Official guide to writing efficient, secure, and cacheable Dockerfiles. + detail: Use to drill multi-stage builds, layer ordering, and security hardening. + sort_order: 3 + - name: Play with Docker + url: https://labs.play-with-docker.com + description: Browser-based Docker playground for hands-on practice. + detail: Use for quick experimentation without local setup. + sort_order: 4 + +# ============================================================================ +# kubernetes → Dev Reference +# ============================================================================ +- id: kubernetes + title: Kubernetes + objective: > + Understand Kubernetes deeply enough to design and troubleshoot in an interview. + color: "#f472b6" + target_track: dev-reference + resources: + - name: Kubernetes Official Docs + url: https://kubernetes.io/docs/home/ + description: Authoritative reference for all K8s concepts, APIs, and operations. + detail: Use as the primary lookup - concepts, tasks, API reference. + sort_order: 1 + - name: Kubernetes Up & Running (Hightower, Burns, Beda) + url: https://www.amazon.com/Kubernetes-Running-Dive-Future-Infrastructure/dp/109811020X + description: The definitive K8s book - architecture, workloads, services, deployments. + detail: Use for structured concept building before interview prep. + sort_order: 2 + - name: KillerCoda / Killershell + url: https://killercoda.com + description: Interactive K8s scenarios and CKA/CKAD exam simulators. + detail: Use for hands-on cluster practice and timed kubectl drills. + sort_order: 3 + - name: Learnk8s + url: https://learnk8s.io + description: Visual guides, architecture deep dives, and production patterns. + detail: Use for the visual explainers on networking, scheduling, and scaling. + sort_order: 4 + - name: CKAD Exercises (dgkanatsios) + url: https://github.com/dgkanatsios/CKAD-exercises + description: Curated kubectl exercises mapped to CKAD exam domains. + detail: Use for daily CLI drill reps - pods, services, configmaps, RBAC. + sort_order: 5 + +# ============================================================================ +# nlp → AI Engineering +# ============================================================================ +- id: nlp + title: NLP + objective: > + Prepare for ML/NLP interviews - transformers, attention, and the topics that come up most. + color: "#eb5757" + target_track: ai-engineering + resources: + - name: Speech and Language Processing (Jurafsky & Martin) + url: https://web.stanford.edu/~jurafsky/slp3/ + description: Comprehensive NLP textbook - free online draft covering classical and neural methods. + detail: Use as the theory backbone - tokenization, parsing, semantics, transformers. + sort_order: 1 + - name: Hugging Face NLP Course + url: https://huggingface.co/learn/nlp-course + description: Hands-on transformer pipelines, fine-tuning, and tokenizer internals. + detail: Use for practical coding reps with real models and datasets. + sort_order: 2 + - name: The Illustrated Transformer (Jay Alammar) + url: https://jalammar.github.io/illustrated-transformer/ + description: Visual, intuitive explanation of attention and transformer architecture. + detail: Use before interviews to refresh self-attention, multi-head, positional encoding. + sort_order: 3 + - name: Papers With Code - NLP + url: https://paperswithcode.com/area/natural-language-processing + description: State-of-the-art benchmarks and papers across all NLP tasks. + detail: Use to stay current on which methods top leaderboards and why. + sort_order: 4 + - name: Stanford CS224N + url: https://web.stanford.edu/class/cs224n/ + description: Deep learning for NLP - lecture videos, assignments, and slides. + detail: Use for structured learning - word vectors, RNNs, attention, pretraining. + sort_order: 5 + +# ============================================================================ +# physical_ai → Embodied AI +# ============================================================================ +- id: physical_ai + title: Physical AI + objective: > + Prep for the next wave of AI roles - world models, sim-to-real, 3D vision, and physical AI. + color: "#ffc47c" + target_track: embodied-ai + resources: + - name: NVIDIA Physical AI + url: https://developer.nvidia.com/physical-ai + description: NVIDIA's physical AI platform - Isaac Sim, Cosmos, and the full sim-to-real stack. + detail: Study the end-to-end pipeline from simulation to real-world robot deployment. + sort_order: 1 + - name: World Labs + url: https://www.worldlabs.ai + description: Fei-Fei Li's spatial intelligence company building large world models. + detail: Track the spatial AI thesis and job openings in this space. + sort_order: 2 + - name: Figure AI Careers + url: https://www.figure.ai/careers + description: Humanoid robotics company hiring for world model, perception, and control roles. + detail: Study their job descriptions to understand the skill requirements for physical AI roles. + sort_order: 3 + - name: Physical Intelligence (pi) + url: https://www.physicalintelligence.company + description: General-purpose robot foundation models. Raised $400M+. + detail: Track the company building the GPT moment for robotics. + sort_order: 4 + - name: 1X Technologies + url: https://www.1x.tech + description: NEO humanoid built with learned neural network policies. + detail: Study the consumer humanoid thesis and their ML-first approach. + sort_order: 5 + +# ============================================================================ +# distributed_systems → Applied Systems +# ============================================================================ +- id: distributed_systems + title: Distributed Systems + objective: > + Master distributed systems fundamentals - CAP, consensus, and fault tolerance for interviews. + color: "#5bb86e" + target_track: applied-systems + resources: + - name: Designing Data-Intensive Applications + url: https://dataintensive.net + description: The bible of distributed systems - replication, partitioning, consistency, and consensus. + detail: Read chapters 5-9 thoroughly - this is the most tested material in system design interviews. + sort_order: 1 + - name: MIT 6.824 (Distributed Systems) + url: https://pdos.csail.mit.edu/6.824/ + description: Graduate-level course with labs on MapReduce, Raft, and fault-tolerant key-value stores. + detail: Work through the Raft lab at minimum - it's the gold standard for understanding consensus. + sort_order: 2 + - name: Jepsen + url: https://jepsen.io + description: Kyle Kingsbury's distributed systems correctness testing - real failure analysis of production databases. + detail: Read 2-3 analyses (e.g., Redis, MongoDB, CockroachDB) to see how real systems fail. + sort_order: 3 + - name: Fly.io Distributed Systems Challenges (Gossip Glomers) + url: https://fly.io/dist-sys/ + description: Hands-on challenges - broadcast, grow-only counters, Kafka-style logs, and total-order broadcast. + detail: Practice implementing distributed protocols. Work through all 6 challenges. + sort_order: 4 + - name: Martin Kleppmann's Blog + url: https://martin.kleppmann.com + description: Deep dives on CRDTs, distributed transactions, and consistency models from the DDIA author. + detail: Follow for precise technical writing on distributed systems edge cases. + sort_order: 5 + +# ============================================================================ +# deep_learning → ML Foundations (skip for now) +# ============================================================================ +- id: deep_learning + title: Deep Learning + objective: > + Know your architectures cold - CNNs, RNNs, transformers, attention, and training dynamics. + color: "#eb5757" + target_track: ml-foundations + resources: + - name: d2l.ai + url: https://d2l.ai + description: Interactive deep learning textbook with PyTorch, TensorFlow, and JAX. + detail: Best free textbook. Work through attention and transformer chapters. + sort_order: 1 + - name: Fast.ai + url: https://course.fast.ai + description: Practical deep learning for coders - top-down learning approach. + detail: Great for building intuition before diving into theory. + sort_order: 2 + - name: 3Blue1Brown Neural Networks + url: https://www.3blue1brown.com/topics/neural-networks + description: Visual explanations of backpropagation, gradient descent, and transformers. + detail: Watch before interviews to refresh geometric intuition. + sort_order: 3 + - name: The Illustrated Transformer + url: https://jalammar.github.io/illustrated-transformer/ + description: Step-by-step visual walkthrough of the transformer architecture. + detail: Go-to reference for explaining attention in interviews. + sort_order: 4 + +# ============================================================================ +# machine_learning → ML Foundations (skip for now) +# ============================================================================ +- id: machine_learning + title: Machine Learning + objective: > + Nail the fundamentals - bias-variance, regularization, feature engineering, and model selection. + color: "#5e6ad2" + target_track: ml-foundations + resources: + - name: StatQuest + url: https://statquest.org + description: Visual, intuitive explanations of ML algorithms and statistics. + detail: Watch before interviews to refresh fundamentals. + sort_order: 1 + - name: Scikit-Learn User Guide + url: https://scikit-learn.org/stable/user_guide.html + description: Practical ML algorithms with clear mathematical foundations. + detail: Reference for classical ML algorithm details and trade-offs. + sort_order: 2 + - name: ML Interview Prep (Chip Huyen) + url: https://huyenchip.com/ml-interviews-book/ + description: Comprehensive ML interview question bank from a Stanford instructor. + detail: Work through one chapter per week during active prep. + sort_order: 3 + - name: Made With ML + url: https://madewithml.com + description: End-to-end ML systems with production-grade patterns. + detail: Great for system design angle of ML interviews. + sort_order: 4 + +# ============================================================================ +# data_engineering → Applied Systems +# ============================================================================ +- id: data_engineering + title: Data Engineering + objective: > + Design pipelines that don't break - ETL patterns, data quality, medallion architecture, and orchestration. + color: "#f472b6" + target_track: applied-systems + resources: + - name: Fundamentals of Data Engineering (Reis & Housley) + url: https://www.oreilly.com/library/view/fundamentals-of-data/9781098108298/ + description: The canonical data engineering textbook - lifecycle, architecture, and trade-offs. + detail: Read chapters on storage, ingestion, and orchestration before interviews. + sort_order: 1 + - name: DataEngineer.io + url: https://dataengineer.io + description: Curated data engineering learning paths and interview prep. + detail: Use for structured prep when targeting DE roles. + sort_order: 2 + - name: Start Data Engineering + url: https://www.startdataengineering.com + description: Hands-on DE projects with real architectures. + detail: Build portfolio projects that demonstrate pipeline design. + sort_order: 3 + - name: Seattle Data Guy + url: https://www.youtube.com/@SeattleDataGuy + description: Data engineering career advice and system design walkthroughs. + detail: Great for understanding what companies actually look for. + sort_order: 4 + +# ============================================================================ +# mlops → Applied Systems +# ============================================================================ +- id: mlops + title: MLOps / LLMOps + objective: > + Ship models to production - experiment tracking, CI/CD for ML, monitoring, and cost optimization. + color: "#4ade80" + target_track: applied-systems + resources: + - name: MLOps Community + url: https://mlops.community + description: Community-driven MLOps knowledge base, podcasts, and meetups. + detail: Stay current on production ML patterns and tooling. + sort_order: 1 + - name: Full Stack Deep Learning + url: https://fullstackdeeplearning.com + description: End-to-end ML project lifecycle from data to deployment. + detail: Best course for ML systems design interview prep. + sort_order: 2 + - name: Evidently AI Blog + url: https://www.evidentlyai.com/blog + description: ML monitoring, data drift detection, and model quality tracking. + detail: Reference for monitoring and observability patterns. + sort_order: 3 + - name: Chip Huyen - Designing ML Systems + url: https://www.oreilly.com/library/view/designing-machine-learning/9781098107956/ + description: Production ML systems - data pipelines, deployment, monitoring. + detail: The canonical ML systems design book for interviews. + sort_order: 4 + +# ============================================================================ +# rag → AI Engineering +# ============================================================================ +- id: rag + title: RAG + objective: > + Build retrieval systems that actually work - chunking, embeddings, hybrid search, and reranking. + color: "#55cdff" + target_track: ai-engineering + resources: + - name: Anthropic Contextual Retrieval + url: https://www.anthropic.com/news/contextual-retrieval + description: Anthropic's technique for chunk-level context enrichment in RAG. + detail: Core reference for modern RAG architecture patterns. + sort_order: 1 + - name: LlamaIndex + url: https://docs.llamaindex.ai + description: RAG framework with indexing, retrieval, and response synthesis. + detail: Study the architecture for understanding RAG pipeline components. + sort_order: 2 + - name: Pinecone Learning Center + url: https://www.pinecone.io/learn/ + description: Vector search fundamentals, embedding models, and RAG patterns. + detail: Good visual explanations of similarity search and indexing. + sort_order: 3 + - name: RAGAS + url: https://docs.ragas.io + description: RAG evaluation framework - faithfulness, relevance, and context metrics. + detail: Essential for discussing how to evaluate RAG quality in interviews. + sort_order: 4 + +# ============================================================================ +# event_driven → Applied Systems +# ============================================================================ +- id: event_driven + title: Event-Driven Architecture + objective: > + Design reactive systems - pub/sub, CQRS, CDC, event sourcing, and async communication patterns. + color: "#ffc47c" + target_track: applied-systems + resources: + - name: Designing Event-Driven Systems (Confluent) + url: https://www.confluent.io/designing-event-driven-systems/ + description: Free book on event-driven architecture with Kafka patterns. + detail: Read chapters on event sourcing and CQRS before system design interviews. + sort_order: 1 + - name: Martin Fowler - Event-Driven + url: https://martinfowler.com/articles/201701-event-driven.html + description: Foundational article distinguishing event notification, state transfer, and sourcing. + detail: Know the four patterns and when to use each. + sort_order: 2 + - name: Enterprise Integration Patterns + url: https://www.enterpriseintegrationpatterns.com + description: Canonical messaging patterns - routers, transformers, and channels. + detail: Reference for message routing and integration design questions. + sort_order: 3 + - name: Kafka - The Definitive Guide + url: https://www.confluent.io/resources/kafka-the-definitive-guide-v2/ + description: Deep dive into Kafka internals, partitioning, and consumer groups. + detail: Essential if interviewing at companies using Kafka. + sort_order: 4 diff --git a/curriculum/tracks/ai-engineering.yaml b/curriculum/tracks/ai-engineering.yaml new file mode 100644 index 0000000..efd351a --- /dev/null +++ b/curriculum/tracks/ai-engineering.yaml @@ -0,0 +1,407 @@ +id: ai-engineering +title: AI Engineering +description: > + Build production AI systems - from inference to agents to evals. +difficulty: intermediate +track_type: resource + +modules: + - id: inference + title: Inference Infrastructure + objective: > + Learn how to serve models fast and cheap - from quantization to speculative decoding. + color: "#55cdff" + sort_order: 1 + resources: + - name: vLLM + url: https://github.com/vllm-project/vllm + description: > + High-throughput LLM serving with PagedAttention, continuous batching, and speculative decoding. + detail: > + The default choice for self-hosted inference. Study PagedAttention for memory-efficient KV-cache management. + sort_order: 1 + - name: NVIDIA TensorRT-LLM + url: https://github.com/NVIDIA/TensorRT-LLM + description: > + NVIDIA's optimized inference library with FP8 quantization, in-flight batching, and multi-GPU tensor parallelism. + detail: > + Best raw performance on NVIDIA hardware. Compare latency/throughput vs vLLM for production decisions. + sort_order: 2 + - name: SGLang + url: https://github.com/sgl-project/sglang + description: > + Fast structured generation with RadixAttention for prefix caching and constrained decoding. + detail: > + Study RadixAttention for KV-cache sharing across requests. Ideal for JSON/schema-constrained output. + sort_order: 3 + - name: llama.cpp + url: https://github.com/ggerganov/llama.cpp + description: > + CPU and mixed CPU/GPU inference with GGUF quantization formats. Runs LLMs on consumer hardware. + detail: > + Essential for edge/local deployment. Understand quantization levels (Q4_K_M, Q5_K_S) and their accuracy tradeoffs. + sort_order: 4 + - name: Ollama + url: https://ollama.com + description: > + Developer-friendly local LLM runner wrapping llama.cpp with Docker-like model management. + detail: > + Fast prototyping tool. Use for local dev loops, then graduate to vLLM/TensorRT-LLM for production. + sort_order: 5 + - name: Hugging Face TGI + url: https://github.com/huggingface/text-generation-inference + description: > + Production-ready inference server with flash attention, quantization, and watermarking support. + detail: > + Good middle ground between ease-of-use and performance. Native HF model hub integration. + sort_order: 6 + + - id: agents + title: Agent Architecture + objective: > + Build agents that actually work - tool use, memory, planning, and multi-agent patterns. + color: "#ffc47c" + sort_order: 2 + resources: + - name: LangGraph + url: https://github.com/langchain-ai/langgraph + description: > + Graph-based agent orchestration with cycles, persistence, and human-in-the-loop patterns. + detail: > + The most mature agent framework. Study the state machine pattern for reliable multi-step workflows. + sort_order: 1 + - name: CrewAI + url: https://github.com/crewAIInc/crewAI + description: > + Multi-agent framework with role-based agents, task delegation, and sequential/parallel execution. + detail: > + Good for rapid prototyping of multi-agent systems. Compare the role-play pattern vs LangGraph's graph approach. + sort_order: 2 + - name: AutoGen (Microsoft) + url: https://github.com/microsoft/autogen + description: > + Multi-agent conversation framework with customizable agents, code execution, and group chat patterns. + detail: > + Study the conversational agent pattern and how it handles tool use, code generation, and verification. + sort_order: 3 + - name: Anthropic Claude Tool Use + url: https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview + description: > + Native function calling with structured outputs, parallel tool use, and streaming tool results. + detail: > + Master the tool-use protocol: schema definition, forced tool calls, and error handling patterns. + sort_order: 4 + - name: OpenAI Agents SDK + url: https://github.com/openai/openai-agents-python + description: > + Official agent framework with handoffs, guardrails, tracing, and multi-agent orchestration. + detail: > + Study the handoff pattern for agent-to-agent delegation and the built-in tracing for debugging. + sort_order: 5 + - name: Pydantic AI + url: https://ai.pydantic.dev + description: > + Type-safe agent framework with structured outputs, dependency injection, and model-agnostic design. + detail: > + Best for Python-heavy teams wanting type safety. The structured output + validation pattern is production-grade. + sort_order: 6 + + - id: evals + title: Evals & Observability + objective: > + Know when your LLM system is broken before your users do. + color: "#5bb86e" + sort_order: 3 + resources: + - name: Braintrust + url: https://www.braintrust.dev + description: > + End-to-end LLM eval platform with scoring, experiments, datasets, and production logging. + detail: > + Study the eval-driven development loop: define metrics, run experiments, compare prompts/models systematically. + sort_order: 1 + - name: LangSmith + url: https://smith.langchain.com + description: > + Tracing, evaluation, and monitoring platform for LLM applications with dataset management. + detail: > + Use for tracing complex chains/agents. The trace visualization is invaluable for debugging multi-step flows. + sort_order: 2 + - name: Arize Phoenix + url: https://github.com/Arize-ai/phoenix + description: > + Open-source LLM observability with tracing, evals, embeddings analysis, and retrieval diagnostics. + detail: > + Best open-source option. Study the embedding drift detection and retrieval quality metrics. + sort_order: 3 + - name: RAGAS + url: https://github.com/explodinggradients/ragas + description: > + RAG evaluation framework measuring faithfulness, answer relevancy, context precision, and recall. + detail: > + Essential for any RAG system. Implement the four core metrics as CI gates for retrieval quality. + sort_order: 4 + - name: Guardrails AI + url: https://github.com/guardrails-ai/guardrails + description: > + Input/output validation framework with validators for PII, toxicity, hallucination, and schema compliance. + detail: > + Production safety layer. Study the validator composability and how to build custom domain validators. + sort_order: 5 + - name: Promptfoo + url: https://github.com/promptfoo/promptfoo + description: > + CLI-first eval tool for testing prompts against datasets with assertions and model comparison. + detail: > + Fast iteration tool for prompt engineering. Run prompt A/B tests before deploying changes. + sort_order: 6 + + - id: retrieval + title: RAG & Retrieval + objective: > + Ground your LLM in real data - chunking, retrieval, reranking, and citations that work. + color: "#eb5757" + sort_order: 4 + resources: + - name: LlamaIndex + url: https://github.com/run-llama/llama_index + description: > + Data framework for LLM apps with advanced indexing, retrieval, and query engine abstractions. + detail: > + The most complete RAG toolkit. Study the node parser, retriever, and response synthesizer abstractions. + sort_order: 1 + - name: Pinecone + url: https://www.pinecone.io + description: > + Managed vector database with hybrid search, metadata filtering, and serverless scaling. + detail: > + Production vector DB for teams that want zero ops. Compare cost/latency vs self-hosted alternatives. + sort_order: 2 + - name: Weaviate + url: https://weaviate.io + description: > + Open-source vector database with hybrid search, multi-tenancy, and built-in vectorization modules. + detail: > + Best open-source option for hybrid (vector + keyword) search. Study the GraphQL API and module system. + sort_order: 3 + - name: Cohere Rerank + url: https://cohere.com/rerank + description: > + Cross-encoder reranking model that dramatically improves retrieval precision over bi-encoder search. + detail: > + Always add a reranking step. The lift from retrieve-then-rerank vs retrieve-only is significant. + sort_order: 4 + - name: Unstructured.io + url: https://github.com/Unstructured-IO/unstructured + description: > + Document parsing and chunking for PDFs, HTML, images, and office files with layout-aware extraction. + detail: > + The ingestion layer most RAG tutorials skip. Study table extraction, section detection, and chunk boundary quality. + sort_order: 5 + - name: ColBERT / RAGatouille + url: https://github.com/bclavie/RAGatouille + description: > + Late-interaction retrieval with token-level matching. Better than dense retrieval for domain-specific queries. + detail: > + Study the late-interaction pattern: why per-token matching outperforms single-vector similarity in many domains. + sort_order: 6 + + - id: memory + title: Memory & Personalization + objective: > + Give your AI a memory that persists across sessions and scales with usage. + color: "#5e6ad2" + sort_order: 5 + resources: + - name: Mem0 + url: https://github.com/mem0ai/mem0 + description: > + Memory layer for AI apps with automatic extraction, deduplication, and conflict resolution across conversations. + detail: > + Study the memory extraction pipeline: how raw conversations become structured, queryable memory entries. + sort_order: 1 + - name: Letta (MemGPT) + url: https://github.com/letta-ai/letta + description: > + Stateful agents with tiered memory (core/archival/recall) and self-editing memory management. + detail: > + The canonical reference for LLM self-managed memory. Study the virtual context window and memory paging. + sort_order: 2 + - name: Zep + url: https://github.com/getzep/zep + description: > + Memory server with auto-summarization, entity extraction, temporal awareness, and hybrid search. + detail: > + Good for chat applications needing conversation history compression and entity tracking. + sort_order: 3 + - name: LangMem (LangChain) + url: https://github.com/langchain-ai/langmem + description: > + Memory management for LangGraph agents with semantic memory extraction and consolidation. + detail: > + Study the memory formation patterns: how agents decide what to remember and what to forget. + sort_order: 4 + - name: Cognee + url: https://github.com/topoteretes/cognee + description: > + Knowledge graph memory with ECL pipelines for building structured memory from unstructured data. + detail: > + Study the graph-based memory approach: entities + relations vs flat vector stores for memory retrieval. + sort_order: 5 + - name: OpenAI Memory Research + url: https://openai.com/index/memory-and-new-controls-for-chatgpt/ + description: > + Production memory system in ChatGPT: what it remembers, user controls, and privacy considerations. + detail: > + Study the UX patterns: how to surface memory to users without being creepy or overwhelming. + sort_order: 6 + + - id: fine_tuning + title: Fine-Tuning & Alignment + objective: > + Make a foundation model yours - LoRA, RLHF, and alignment without breaking the bank. + color: "#f472b6" + sort_order: 6 + resources: + - name: Hugging Face PEFT + url: https://github.com/huggingface/peft + description: > + Parameter-efficient fine-tuning with LoRA, QLoRA, prefix tuning, and adapter methods. + detail: > + Start here for any fine-tuning task. LoRA + 4-bit quantization (QLoRA) runs on a single consumer GPU. + sort_order: 1 + - name: Axolotl + url: https://github.com/axolotl-ai-cloud/axolotl + description: > + Streamlined fine-tuning toolkit with YAML configs for LoRA, full fine-tune, DPO, and multi-GPU training. + detail: > + Best developer experience for fine-tuning. One YAML config covers model, dataset, and training params. + sort_order: 2 + - name: Unsloth + url: https://github.com/unslothai/unsloth + description: > + 2-5x faster fine-tuning with 70% less memory via custom CUDA kernels and optimized backpropagation. + detail: > + Drop-in speedup for QLoRA training. Study the memory optimizations to understand the efficiency gains. + sort_order: 3 + - name: TRL (Transformer Reinforcement Learning) + url: https://github.com/huggingface/trl + description: > + RLHF, DPO, PPO, and reward modeling library from Hugging Face for alignment training. + detail: > + The standard library for alignment. Study DPO vs PPO: when direct preference optimization beats full RLHF. + sort_order: 4 + - name: OpenAI Fine-Tuning + url: https://platform.openai.com/docs/guides/fine-tuning + description: > + API-based fine-tuning for GPT-4o and GPT-4o-mini with automatic hyperparameter selection. + detail: > + Easiest path to fine-tuning. Compare API fine-tuning cost/quality vs self-hosted LoRA on open models. + sort_order: 5 + - name: Anthropic Constitutional AI + url: https://www.anthropic.com/research/constitutional-ai-harmlessness-from-ai-feedback + description: > + Self-supervised alignment where the model critiques and revises its own outputs against principles. + detail: > + Study the principle-based alignment approach: how to define behavioral constraints without human labeling. + sort_order: 6 + + - id: multimodal + title: Multimodal Systems + objective: > + Go beyond text - images, audio, video, and cross-modal reasoning in one system. + color: "#4ade80" + sort_order: 7 + resources: + - name: GPT-4o + url: https://openai.com/index/hello-gpt-4o/ + description: > + Natively multimodal model processing text, images, and audio in a unified architecture. + detail: > + The production standard for multimodal. Study how unified tokenization enables cross-modal reasoning. + sort_order: 1 + - name: Google Gemini + url: https://deepmind.google/technologies/gemini/ + description: > + Natively multimodal with long-context (1M+ tokens), video understanding, and code generation. + detail: > + Study the long-context multimodal pattern: processing entire videos and documents in a single call. + sort_order: 2 + - name: LLaVA + url: https://llava-vl.github.io + description: > + Open-source vision-language model connecting CLIP visual encoder to LLaMA language model. + detail: > + The reference architecture for open VLMs. Study the visual instruction tuning dataset and training pipeline. + sort_order: 3 + - name: OpenAI Whisper + url: https://github.com/openai/whisper + description: > + Robust speech recognition across languages, accents, and noise conditions with zero-shot generalization. + detail: > + Production-grade ASR. Study the encoder-decoder architecture and how it handles multilingual transcription. + sort_order: 4 + - name: Twelve Labs + url: https://www.twelvelabs.io + description: > + Video understanding API with temporal search, summarization, and generation from video content. + detail: > + Study how video embeddings differ from image embeddings: temporal awareness and action recognition. + sort_order: 5 + - name: Pixtral / Qwen-VL + url: https://huggingface.co/Qwen/Qwen2.5-VL-72B-Instruct + description: > + Open-weight vision-language models approaching GPT-4o quality on visual reasoning benchmarks. + detail: > + Track the open VLM frontier. Compare Qwen-VL, Pixtral, and InternVL on your domain-specific tasks. + sort_order: 6 + + - id: reasoning + title: Reasoning & Planning + objective: > + Unlock multi-step reasoning - chain-of-thought, tree search, and self-consistency. + color: "#55cdff" + sort_order: 8 + resources: + - name: OpenAI o1 / o3 + url: https://openai.com/index/learning-to-reason-with-llms/ + description: > + Chain-of-thought reasoning models with internal deliberation for math, science, and coding. + detail: > + Study the test-time compute scaling paradigm: spending more inference tokens improves reasoning quality. + sort_order: 1 + - name: Claude Extended Thinking + url: https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking + description: > + Explicit thinking traces in Claude for complex analysis, planning, and multi-step problem solving. + detail: > + Study how exposing the reasoning chain improves transparency and enables better prompt engineering. + sort_order: 2 + - name: DeepSeek-R1 + url: https://github.com/deepseek-ai/DeepSeek-R1 + description: > + Open-weight reasoning model trained with RL to generate detailed chain-of-thought before answering. + detail: > + The open-source reasoning frontier. Study the RL training recipe: how reinforcement learning teaches reasoning. + sort_order: 3 + - name: Tree of Thoughts + url: https://github.com/princeton-nlp/tree-of-thought-llm + description: > + Deliberate problem solving via tree search over reasoning paths with evaluation and backtracking. + detail: > + Study the search-over-thoughts pattern: BFS/DFS over reasoning branches with LLM-as-evaluator. + sort_order: 4 + - name: Self-Consistency (Wang et al.) + url: https://arxiv.org/abs/2203.11171 + description: > + Sample multiple reasoning paths and take majority vote. Simple technique that reliably boosts accuracy. + detail: > + The cheapest reasoning improvement: sample N completions, majority-vote the answer. Works with any model. + sort_order: 5 + - name: LATS (Language Agent Tree Search) + url: https://github.com/laetitia-teo/lats + description: > + Monte Carlo tree search for LLM agents combining reasoning, acting, and planning with environment feedback. + detail: > + Study MCTS for agents: how tree search + LLM evaluation enables planning in complex environments. + sort_order: 6 diff --git a/curriculum/tracks/applied-systems.yaml b/curriculum/tracks/applied-systems.yaml new file mode 100644 index 0000000..662e4b0 --- /dev/null +++ b/curriculum/tracks/applied-systems.yaml @@ -0,0 +1,229 @@ +id: applied-systems +title: Applied Systems +description: > + Production ML systems beyond model training - LLMOps, recommendation engines, + data pipelines, 3D vision, and distributed training at scale. +difficulty: intermediate +track_type: resource +modules: + - id: llmops + title: LLMOps + objective: > + Run LLMs in production without surprises - eval, tracing, guardrails, and reliability. + color: "#55cdff" + sort_order: 1 + resources: + - name: LangSmith + url: https://www.langchain.com/langsmith + description: > + Tracing, evaluation datasets, and regression checks for LLM apps. + detail: > + Use to practice observability and experiment comparison. + sort_order: 1 + - name: Langfuse + url: https://langfuse.com + description: > + Open-source LLM observability, prompts, and scoring pipelines. + detail: > + Use to drill telemetry schema and incident debugging. + sort_order: 2 + - name: Arize Phoenix + url: https://phoenix.arize.com + description: > + LLM tracing, retrieval analysis, and quality diagnostics. + detail: > + Use for retrieval error triage and quality root-cause analysis. + sort_order: 3 + - name: Weights & Biases Weave + url: https://wandb.ai/site/weave + description: > + Model/app evaluation and experiment tracking for LLM workflows. + detail: > + Use for side-by-side prompt/pipeline iteration practice. + sort_order: 4 + - name: Guardrails AI + url: https://www.guardrailsai.com + description: > + Validation and policy guardrails for structured LLM outputs. + detail: > + Use to rehearse fail-safe output enforcement patterns. + sort_order: 5 + + - id: recsys + title: RecSys + objective: > + Build recommendations that actually convert - retrieval, ranking, and experimentation. + color: "#ffc47c" + sort_order: 2 + resources: + - name: Recommender Systems Handbook + url: https://link.springer.com/book/10.1007/978-1-4899-7637-6 + description: > + Classical and modern recommendation methods with system context. + detail: > + Use as foundational theory for interview tradeoff answers. + sort_order: 1 + - name: ACM RecSys Conference + url: https://recsys.acm.org + description: > + Current research trends in ranking, retrieval, and personalization. + detail: > + Use to stay current on evaluation and modeling directions. + sort_order: 2 + - name: NVIDIA Merlin + url: https://developer.nvidia.com/merlin + description: > + Industrial recommendation stack patterns and tooling. + detail: > + Use for practical pipeline architecture examples. + sort_order: 3 + - name: Eugene Yan (Applied RecSys) + url: https://eugeneyan.com + description: > + Production recommendation and search system case studies. + detail: > + Use for interview-ready system narratives and metrics framing. + sort_order: 4 + - name: Shaped Blog + url: https://www.shaped.ai/blog + description: > + Modern personalization, retrieval-ranking stacks, and online learning. + detail: > + Use for current production patterns and tradeoff examples. + sort_order: 5 + + - id: dataops + title: DataOps + objective: > + Keep your data pipelines healthy - orchestration, quality, lineage, and reliability. + color: "#5bb86e" + sort_order: 3 + resources: + - name: Apache Airflow + url: https://airflow.apache.org + description: > + Workflow orchestration patterns, scheduling, and operational controls. + detail: > + Use to practice DAG design and failure recovery patterns. + sort_order: 1 + - name: dbt Docs + url: https://docs.getdbt.com + description: > + Transformation modeling, testing, and analytics engineering workflow. + detail: > + Use to drill model contracts, tests, and deployment workflows. + sort_order: 2 + - name: Dagster + url: https://dagster.io + description: > + Asset-oriented orchestration and data platform software design. + detail: > + Use for lineage-aware orchestration and asset health concepts. + sort_order: 3 + - name: DataHub + url: https://datahubproject.io + description: > + Metadata platform and end-to-end lineage across data assets. + detail: > + Use for catalog, ownership, and governance interview scenarios. + sort_order: 4 + - name: Great Expectations + url: https://greatexpectations.io + description: > + Automated data quality assertions and validation pipelines. + detail: > + Use for reliability guardrails and data contract enforcement. + sort_order: 5 + + - id: 3d_vision + title: 3D Vision + objective: > + Reconstruct and understand 3D scenes - NeRFs, Gaussian Splatting, and spatial AI. + color: "#f472b6" + sort_order: 4 + resources: + - name: Nerfstudio + url: https://nerf.studio + description: > + Modular framework for NeRF development - training, visualization, and export of neural radiance fields. + detail: > + Hands-on practice with NeRF pipelines and scene reconstruction. + sort_order: 1 + - name: gsplat + url: https://docs.gsplat.studio + description: > + Optimized 3D Gaussian Splatting library for real-time novel view synthesis. + detail: > + Study why Gaussian Splatting is overtaking NeRFs for real-time applications. + sort_order: 2 + - name: Open3D + url: http://www.open3d.org + description: > + Open-source library for 3D data processing - point clouds, meshes, RGB-D images, and visualization. + detail: > + Essential toolkit for 3D computer vision pipelines. + sort_order: 3 + - name: Habitat (Meta AI) + url: https://aihabitat.org + description: > + High-performance 3D simulation platform for embodied AI research - navigation, manipulation, and rearrangement. + detail: > + Study the sim-to-real pipeline and how 3D understanding enables robot navigation. + sort_order: 4 + - name: 3D Gaussian Splatting (Original Paper) + url: https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/ + description: > + The seminal paper on representing scenes as collections of 3D Gaussians for real-time rendering. + detail: > + Read for the core algorithm: why Gaussians, how splatting works, and the quality-speed tradeoff vs NeRFs. + sort_order: 5 + + - id: distributed_ml + title: Distributed ML + objective: > + Train models across hundreds of GPUs without losing your mind - parallelism and fault tolerance. + color: "#4ade80" + sort_order: 5 + resources: + - name: DeepSpeed + url: https://www.deepspeed.ai + description: > + Microsoft's distributed training library: ZeRO stages, pipeline parallelism, mixed precision, and inference optimization. + detail: > + Study ZeRO-1/2/3 memory partitioning - it's the most asked-about distributed training concept. + sort_order: 1 + - name: Megatron-LM + url: https://github.com/NVIDIA/Megatron-LM + description: > + NVIDIA's framework for training multi-billion parameter models with tensor and pipeline parallelism. + detail: > + Study tensor parallelism (column/row splitting) and pipeline parallelism (micro-batching). + sort_order: 2 + - name: PyTorch FSDP + url: https://pytorch.org/docs/stable/fsdp.html + description: > + Fully Sharded Data Parallel - PyTorch-native ZeRO-3 implementation for large model training. + detail: > + Know FSDP wrapping policies, mixed precision, and activation checkpointing. + sort_order: 3 + - name: Ray Train + url: https://docs.ray.io/en/latest/train/train.html + description: > + Distributed training orchestration with fault tolerance, elastic scaling, and multi-framework support. + detail: > + Study how Ray abstracts distributed training across heterogeneous clusters. + sort_order: 4 + - name: Horovod + url: https://horovod.ai + description: > + Uber's distributed training framework using ring-AllReduce for efficient gradient synchronization. + detail: > + Understand ring-AllReduce vs tree-AllReduce and how Horovod simplifies multi-GPU training. + sort_order: 5 + - name: Scaling Laws (Chinchilla / Kaplan et al.) + url: https://arxiv.org/abs/2203.15556 + description: > + Compute-optimal training: how to allocate compute budget between model size and data volume. + detail: > + Know the Chinchilla ratio (~20 tokens per parameter) and how it changed LLM training strategies. + sort_order: 6 diff --git a/curriculum/tracks/behavioral-design.yaml b/curriculum/tracks/behavioral-design.yaml new file mode 100644 index 0000000..9ac22cd --- /dev/null +++ b/curriculum/tracks/behavioral-design.yaml @@ -0,0 +1,317 @@ +id: behavioral-design +title: Behavioral Design +description: > + Persuasion frameworks, engagement loops, and product psychology patterns. +difficulty: intermediate +track_type: resource +modules: + - id: frameworks + title: Frameworks + objective: > + The psychology models behind why people stay, buy, and come back. + sort_order: 1 + resources: + - name: "Fogg Behavior Model (B=MAP) - BJ Fogg" + description: "You don't need to increase motivation if you make the behavior easy enough and trigger it at the right time." + detail: "One-tap likes, frictionless sharing, push notifications timed to high-motivation moments (loneliness, boredom)." + resource_type: knowledge + sort_order: 1 + - name: "Hook Model - Nir Eyal" + description: "Variable rewards create compulsive checking. The investment phase (profile building, follower accumulation) increases switching costs." + detail: "Instagram notification -> open app -> scroll feed (variable content) -> post/comment (investment in social graph)." + resource_type: knowledge + sort_order: 2 + - name: "Operant Conditioning - B.F. Skinner" + description: "Slot machines and social feeds use identical reward schedules. Unpredictable rewards are more addictive than predictable ones." + detail: "Pull-to-refresh (slot machine lever), algorithmic feed randomization, unpredictable like counts." + resource_type: knowledge + sort_order: 3 + - name: "Dual Process Theory - Daniel Kahneman" + description: "Design for System 1 and users will act before they think. Infinite scroll, autoplay, and one-click actions all bypass deliberation." + detail: "Autoplay next episode, infinite scroll with no stopping cue, swipe-based interactions (Tinder, TikTok)." + resource_type: knowledge + sort_order: 4 + - name: "Six Principles of Persuasion - Robert Cialdini" + description: "Social proof (like counts, view counts) and scarcity (limited-time offers, disappearing content) are the most weaponized in tech." + detail: "\"X people are viewing this\" (Booking.com), disappearing stories (Snapchat), follower counts as authority signals." + resource_type: knowledge + sort_order: 5 + + - id: feed_design + title: "Feed & Content" + objective: > + How feeds keep you scrolling - ranking, sequencing, and content packaging tactics. + sort_order: 2 + resources: + - name: Infinite Scroll + description: "Content loads endlessly with no natural stopping point, eliminating the pause that would trigger a conscious decision to stop." + detail: "Removes completion cues. The brain never receives a 'finished' signal, so the default behavior is to keep going." + resource_type: knowledge + sort_order: 1 + - name: Autoplay + description: "Next video/episode starts automatically. Stopping requires active effort; continuing is passive." + detail: "Exploits status quo bias and loss aversion. Opting out feels like losing something vs. doing nothing." + resource_type: knowledge + sort_order: 2 + - name: Pull-to-Refresh + description: "Physical gesture that mimics a slot machine lever pull, creating a tactile reward loop." + detail: "Variable-ratio reinforcement. Each pull might reveal new content (reward) or not, creating compulsive repetition." + resource_type: knowledge + sort_order: 3 + - name: Algorithmic Feed + description: "Content ordered by engagement prediction rather than chronology. Shows you what maximizes time-on-app, not what's newest." + detail: "Collaborative filtering + reinforcement learning. Optimizes for engagement proxies (clicks, dwell time, shares) which correlate with emotional arousal." + resource_type: knowledge + sort_order: 4 + - name: Short-Form Vertical Video + description: "Bite-sized content in full-screen vertical format that requires minimal cognitive investment per unit." + detail: "Low effort per item + variable reward per swipe = highest dopamine-per-minute of any content format." + resource_type: knowledge + sort_order: 5 + - name: Sound-On Default + description: "Auto-playing audio captures attention involuntarily and increases emotional engagement with content." + detail: "Audio is processed pre-attentively - you react to sound before you consciously decide to. Increases dwell time significantly." + resource_type: knowledge + sort_order: 6 + - name: Clickbait / Curiosity Gap + description: "Headlines that open an information gap without closing it, creating an irresistible urge to click." + detail: "Loewenstein's information gap theory: the brain treats an open question as an unresolved tension that must be closed." + resource_type: knowledge + sort_order: 7 + - name: Engagement Bait Algorithms + description: "Recommendation systems that learn individual vulnerabilities and exploit them to maximize session length." + detail: "Multi-armed bandit optimization that treats each user as an independent exploitation problem. Finds your specific triggers." + resource_type: knowledge + sort_order: 8 + + - id: social_loops + title: Social Loops + objective: > + Why you keep coming back - identity, reciprocity, and social pressure loops. + sort_order: 3 + resources: + - name: Like Counts / Reactions + description: "Public quantification of social approval that creates a dopamine feedback loop for both poster and audience." + detail: "Social comparison theory + variable reward. Each check might show new likes (or not), creating compulsive monitoring." + resource_type: knowledge + sort_order: 1 + - name: View / Play Counts + description: "Public metrics that create social proof and drive content creators into optimization loops." + detail: "Social proof (Cialdini) - high view counts signal value, creating a winner-take-all attention economy." + resource_type: knowledge + sort_order: 2 + - name: Read Receipts / Typing Indicators + description: "Showing when someone has seen your message creates social obligation to respond immediately." + detail: "Exploits reciprocity norm and social anxiety. Leaving a message 'on read' feels like a social violation." + resource_type: knowledge + sort_order: 3 + - name: Streaks + description: "Consecutive-day usage counters that create artificial commitment through loss aversion." + detail: "Loss aversion is 2x stronger than equivalent gains. A 100-day streak feels too valuable to break, even if the activity itself has no value." + resource_type: knowledge + sort_order: 4 + - name: Follower / Friend Counts + description: "Public social capital metrics that create status hierarchies and competitive accumulation." + detail: "Status games + quantified social comparison. The number becomes the goal, detached from actual relationship quality." + resource_type: knowledge + sort_order: 5 + - name: Social Reciprocity Triggers + description: "Notifications that someone interacted with you, creating obligation to interact back." + detail: "Cialdini's reciprocity principle: receiving creates a felt obligation to give back. Platforms engineer mutual obligations." + resource_type: knowledge + sort_order: 6 + + - id: variable_rewards + title: Variable Rewards + objective: > + The slot machine in every app - unpredictable rewards that build habits. + sort_order: 4 + resources: + - name: Loot Box / Gacha Mechanics + description: "Randomized rewards with varying rarity that create gambling-like compulsion loops." + detail: "Variable-ratio reinforcement (Skinner). The unpredictability of reward magnitude drives compulsive repetition. Identical to slot machines." + resource_type: knowledge + sort_order: 1 + - name: Disappearing Content (Stories) + description: "Content that expires after 24 hours, creating urgency and FOMO that drives frequent checking." + detail: "Scarcity principle (Cialdini) + loss aversion. Content that disappears feels more valuable. Missing it feels like loss." + resource_type: knowledge + sort_order: 2 + - name: Feed Refresh Randomization + description: "Each feed refresh shows different content, making each visit a unique 'pull' of the slot machine." + detail: "Intermittent reinforcement. Sometimes the feed is boring, sometimes it's perfect - the uncertainty is what creates compulsion." + resource_type: knowledge + sort_order: 3 + - name: Micro-Reward Variability + description: "Small, unpredictable rewards scattered throughout the experience - new followers, likes, comments arriving at random intervals." + detail: "Each notification is a mini-reward with uncertain timing and magnitude. The brain treats the phone as a reward-dispensing device." + resource_type: knowledge + sort_order: 4 + + - id: friction + title: "Friction & Dark Patterns" + objective: > + How products make it hard to leave - friction design and dark patterns. + sort_order: 5 + resources: + - name: Friction Asymmetry + description: "Signing up is one click; deleting your account requires 15 steps, phone calls, and waiting periods." + detail: "Asymmetric friction design - make desired behaviors frictionless and undesired behaviors maximally effortful." + resource_type: knowledge + sort_order: 1 + - name: Confirm-shaming + description: "Opt-out text designed to make the user feel bad about their choice. 'No, I don't want to save money.'" + detail: "Social pressure + loss framing. The opt-out is phrased as self-harm, making the user feel stupid for declining." + resource_type: knowledge + sort_order: 2 + - name: Default-On Settings + description: "Privacy-invasive, attention-capturing, or monetization features are enabled by default. Changing requires finding buried settings." + detail: "Status quo bias (Kahneman). 90%+ of users never change defaults. The default IS the choice for most people." + resource_type: knowledge + sort_order: 3 + - name: One-Click Purchase + description: "Removing deliberation time from buying decisions. The faster the purchase, the less rational evaluation occurs." + detail: "Eliminates the cooling-off period where System 2 might intervene. Impulse buying becomes the default mode." + resource_type: knowledge + sort_order: 4 + - name: Roach Motel Pattern + description: "Easy to get into, nearly impossible to get out of. Subscriptions, accounts, data exports deliberately obstructed." + detail: "Sunk cost fallacy + procedural friction. Users stay because leaving is too painful, not because the product is good." + resource_type: knowledge + sort_order: 5 + + - id: notifications + title: "Notifications & Lock-in" + objective: > + The science of pulling you back in - notification timing, triggers, and lock-in. + sort_order: 6 + resources: + - name: Push Notification Batching + description: "Strategically timing notifications to arrive when users are most likely to re-engage (morning, lunch, evening)." + detail: "Fogg's prompt timing - trigger must arrive at a high-motivation moment. ML models predict optimal send times per user." + resource_type: knowledge + sort_order: 1 + - name: FOMO Notifications + description: "'Your friend just posted for the first time in a while,' 'You have unseen memories,' 'Trending near you.'" + detail: "Fear of missing out exploits social belonging needs. Missing content feels like missing a social event." + resource_type: knowledge + sort_order: 2 + - name: Badge Counts (Red Dots) + description: "Unread counters on app icons that create visual tension demanding resolution." + detail: "Zeigarnik effect - incomplete tasks create psychological tension. The red badge is an unresolved task that demands attention." + resource_type: knowledge + sort_order: 3 + - name: Time-Delayed Notifications + description: "Holding back notifications to deliver them strategically rather than in real-time." + detail: "Creates unpredictable reward timing. User never knows when the next notification will arrive, increasing checking behavior." + resource_type: knowledge + sort_order: 4 + - name: Recommendation Engines + description: "Algorithmic content suggestions that create an endless tunnel of 'next' items to consume." + detail: "Collaborative filtering finds patterns across millions of users to predict what will keep YOU specifically engaged longest." + resource_type: knowledge + sort_order: 5 + - name: Collaborative Filtering + description: "'Users like you also watched/bought/liked...' - leveraging collective behavior to predict individual preferences." + detail: "Social proof at scale. The recommendation feels personalized but is actually a statistical prediction from aggregate behavior." + resource_type: knowledge + sort_order: 6 + - name: Taste Profiles / Filter Bubbles + description: "Increasingly narrow content personalization that traps users in comfortable echo chambers." + detail: "Confirmation bias amplification. The algorithm shows you more of what you already engage with, creating a shrinking worldview." + resource_type: knowledge + sort_order: 7 + - name: Social Graph Lock-in + description: "Your social connections become the switching cost. Leaving the platform means losing access to your network." + detail: "Network effects + sunk cost. Years of relationship building become hostage. The platform doesn't need to be good, just irreplaceable." + resource_type: knowledge + sort_order: 8 + + - id: gamification + title: "Gamification & Temporal" + objective: > + Streaks, progress bars, and time pressure - the mechanics of long-term engagement. + sort_order: 7 + resources: + - name: Progress Bars / Completion Loops + description: "Visual indicators showing how close you are to completing a profile, level, or achievement." + detail: "Goal gradient effect - effort increases as you approach a goal. An 80% complete profile feels like unfinished business." + resource_type: knowledge + sort_order: 1 + - name: Badges & Achievements + description: "Virtual rewards for platform-desired behaviors that create a collection instinct." + detail: "Operant conditioning + completionism. Each badge is a micro-reward that shapes behavior toward platform goals." + resource_type: knowledge + sort_order: 2 + - name: Leaderboards / Rankings + description: "Public competitive rankings that exploit status-seeking and social comparison." + detail: "Social comparison theory (Festinger). Rankings create winners and losers, driving both groups to increase engagement." + resource_type: knowledge + sort_order: 3 + - name: Creator Monetization + description: "Revenue sharing that turns users into content-producing employees with variable compensation." + detail: "Variable-ratio reinforcement applied to income. Viral posts create intermittent large rewards, driving compulsive content creation." + resource_type: knowledge + sort_order: 4 + - name: Hiding Time Indicators + description: "Removing or minimizing clock displays, session length indicators, and usage data from the interface." + detail: "Time blindness. Without temporal cues, users lose track of how long they've been engaged. The session extends unnoticed." + resource_type: knowledge + sort_order: 5 + - name: Removing Endpoints + description: "Eliminating natural stopping points in the content consumption experience." + detail: "Without a 'done' signal, the default is to continue. Removing pagination, episode counts, and 'end of feed' signals keeps users going." + resource_type: knowledge + sort_order: 6 + - name: Live / Ephemeral Content + description: "Real-time content (live streams, live events) that creates urgency through irreversibility." + detail: "Scarcity + FOMO. Live content can't be consumed later (or feels lesser when recorded). Missing it is permanent." + resource_type: knowledge + sort_order: 7 + - name: Limited-Time Events + description: "Seasonal events, flash sales, and time-bounded content that create artificial urgency." + detail: "Scarcity principle + loss aversion. Time pressure disables deliberate thinking and triggers impulsive action." + resource_type: knowledge + sort_order: 8 + + - id: case_studies + title: Case Studies + objective: > + How TikTok, Instagram, Duolingo, and others apply these patterns in the real world. + sort_order: 8 + resources: + - name: "Meta (Facebook/Instagram)" + description: "The social comparison machine" + detail: "News Feed Algorithm; Instagram's Social Comparison Engine; Growth Team Tactics" + resource_type: knowledge + sort_order: 1 + - name: TikTok + description: "The attention singularity" + detail: "For You Page Algorithm; Full-Screen Vertical Format; Creator Incentive Structure" + resource_type: knowledge + sort_order: 2 + - name: YouTube + description: "The radicalization pipeline" + detail: "Recommendation Algorithm; Autoplay + Up Next; Thumbnail/Title Optimization" + resource_type: knowledge + sort_order: 3 + - name: Snapchat + description: "The teen engagement machine" + detail: "Streaks; Snap Map; Disappearing Content" + resource_type: knowledge + sort_order: 4 + - name: Apple + description: "The ecosystem lock-in architect" + detail: "iMessage Blue Bubble; Ecosystem Interdependence; App Store Control" + resource_type: knowledge + sort_order: 5 + - name: Amazon + description: "The friction elimination machine" + detail: "1-Click Buy; Prime Membership; Dark Patterns in Cancellation" + resource_type: knowledge + sort_order: 6 + - name: Netflix + description: "The binge engineering pioneer" + detail: "Autoplay Everything; Personalized Artwork; Removal of Episode Counts" + resource_type: knowledge + sort_order: 7 diff --git a/curriculum/tracks/bio-augmentation.yaml b/curriculum/tracks/bio-augmentation.yaml new file mode 100644 index 0000000..be4e21b --- /dev/null +++ b/curriculum/tracks/bio-augmentation.yaml @@ -0,0 +1,385 @@ +id: bio-augmentation +title: Bio-Augmentation +description: > + Human augmentation from biology to brain-computer interfaces - foundational + biology, neurotech, wearables, biohacking, live translation, and the convergence + of human-machine fusion. +difficulty: intermediate +track_type: resource +modules: + - id: foundations + title: Bio Foundations + objective: > + The biology you need to understand before hacking it - cells, DNA, and the + brain. + color: "#55cdff" + sort_order: 1 + resources: + - name: MIT OpenCourseWare - Biology (7.013) + url: https://ocw.mit.edu/courses/7-013-introductory-biology-spring-2018/ + description: > + Introductory biology covering cell structure, genetics, molecular biology, + and biochemistry. + detail: > + Start here. Covers DNA replication, gene expression, protein synthesis - the + vocab you need for everything else. + sort_order: 1 + - name: Khan Academy - Biology & Genetics + url: https://www.khanacademy.org/science/biology + description: > + Visual, step-by-step biology from cells to genetics to human physiology. + detail: > + Use for gap-filling. Jump to the specific unit you need (genetics, molecular + bio, human body systems). + sort_order: 2 + - name: Neuroscience Online (UTHealth) + url: https://nba.uth.tmc.edu/neuroscience/ + description: > + Free open-access neuroscience textbook covering neurons, synapses, sensory + systems, and motor control. + detail: > + Essential for understanding BCI. Focus on chapters 1-4 (cellular + neuroscience) and 8 (somatosensory). + sort_order: 3 + - name: NHGRI Genomics Education + url: https://www.genome.gov/about-genomics + description: > + NIH's primer on genomics: DNA, genes, chromosomes, genetic variation, and + personalized medicine. + detail: > + Read the fact sheets first. Covers CRISPR, gene therapy, pharmacogenomics in + plain language. + sort_order: 4 + - name: Molecular Biology of the Cell (Alberts) + url: https://www.ncbi.nlm.nih.gov/books/NBK21054/ + description: > + The standard reference textbook for cell and molecular biology, freely + available via NCBI. + detail: > + Deep reference. Read chapters 4-7 (DNA, gene expression, proteins) when you + need real depth. + sort_order: 5 + - name: 3Blue1Brown - Biology Essentials + url: https://www.youtube.com/c/3blue1brown + description: > + Visual math and science explanations. Look for videos on information theory, + neural networks, and signal processing. + detail: > + Not biology-specific but the mathematical intuition applies directly to + biosignal processing and neural coding. + sort_order: 6 + + - id: neurotech + title: Neurotech & BCI + objective: > + Read and write to the brain directly - BCIs, neural implants, and where the + field is heading. + color: "#ffc47c" + sort_order: 2 + resources: + - name: Neuralink + url: https://neuralink.com + description: > + Implantable BCI with 1024+ electrode threads. First human trials ongoing for + paralysis patients. + detail: > + Track the N1 implant progress - electrode count, signal quality, and the + robotic insertion system. + sort_order: 1 + - name: Synchron Stentrode + url: https://synchron.com + description: > + Endovascular BCI inserted via blood vessels - no open brain surgery required. + FDA breakthrough device. + detail: > + Study the less-invasive approach: stent-based electrode sits in a blood + vessel near motor cortex. + sort_order: 2 + - name: BrainGate + url: https://www.braingate.org + description: > + Academic BCI research consortium. Pioneered intracortical recording for + cursor control and robotic arm operation. + detail: > + Read the published papers - they are the scientific foundation for commercial + BCIs like Neuralink. + sort_order: 3 + - name: Kernel Flow + url: https://www.kernel.co + description: > + Non-invasive neuroimaging headset using time-domain fNIRS to measure brain + hemodynamics. + detail: > + Study the non-invasive BCI approach - lower resolution but no surgery. + Important for consumer applications. + sort_order: 4 + - name: Paradromics + url: https://paradromics.com + description: > + High-bandwidth implantable BCI targeting speech restoration with 65,000+ + electrode channels. + detail: > + Track the high-channel-count approach - more data from the brain means + richer control signals. + sort_order: 5 + - name: OpenBCI + url: https://openbci.com + description: > + Open-source EEG hardware and software for brain-computer interface research + and development. + detail: > + Hands-on entry point. Buy the Cyton board and start reading your own EEG + signals. + sort_order: 6 + + - id: wearables + title: Wearables & Sensors + objective: > + Turn your body into a data stream - wearables, biosensors, and continuous + health monitoring. + color: "#5bb86e" + sort_order: 3 + resources: + - name: Whoop + url: https://www.whoop.com + description: > + Continuous strain, recovery, and sleep tracking via PPG and accelerometer. + Used by elite athletes. + detail: > + Study the recovery score algorithm - HRV, resting HR, respiratory rate, + sleep quality combined. + sort_order: 1 + - name: Oura Ring + url: https://ouraring.com + description: > + Ring-form-factor health tracker measuring sleep stages, HRV, SpO2, skin + temperature, and activity. + detail: > + Best sleep tracker on the market. Study how temperature trends predict + illness and cycle phases. + sort_order: 2 + - name: Apple Watch Health Sensors + url: https://www.apple.com/apple-watch/health/ + description: > + ECG, blood oxygen, temperature sensing, crash detection. FDA-cleared + medical-grade sensors in a consumer device. + detail: > + Track Apple's health sensor roadmap - blood pressure and glucose monitoring + are coming. + sort_order: 3 + - name: Dexcom CGM + url: https://www.dexcom.com + description: > + Continuous glucose monitoring - real-time blood sugar tracking via + subcutaneous sensor. + detail: > + Even non-diabetics use CGMs. Study how real-time glucose data changes eating + and exercise behavior. + sort_order: 4 + - name: Levels Health + url: https://www.levels.com + description: > + Metabolic health platform built on CGM data - glucose response scoring and + dietary optimization. + detail: > + Study the data layer on top of hardware: how raw sensor data becomes + actionable health insights. + sort_order: 5 + - name: Muse Headband + url: https://choosemuse.com + description: > + Consumer EEG headband for meditation and neurofeedback with real-time brain + state visualization. + detail: > + Entry-level brain sensing. Limited but useful for understanding EEG signals + and neurofeedback loops. + sort_order: 6 + + - id: biohacking + title: Biohacking + objective: > + Optimize your body systematically - sleep, nutrition, longevity, and + performance protocols. + color: "#eb5757" + sort_order: 4 + resources: + - name: Huberman Lab Podcast + url: https://www.hubermanlab.com + description: > + Stanford neuroscientist covering sleep, focus, hormones, exercise science, + and brain optimization protocols. + detail: > + The single best source for evidence-based protocols. Start with sleep, light + exposure, and dopamine episodes. + sort_order: 1 + - name: Peter Attia - The Drive + url: https://peterattiamd.com + description: > + Longevity medicine: exercise, nutrition, sleep, pharmacology, and healthspan + optimization frameworks. + detail: > + Deep dives on Zone 2 training, VO2max, cancer screening, and the longevity + framework from his book Outlive. + sort_order: 2 + - name: Bryan Johnson Blueprint + url: https://blueprint.bryanjohnson.com + description: > + Extreme longevity protocol: 100+ supplements, strict diet, biomarker + tracking, organ-age measurements. + detail: > + Study as the extreme end of the spectrum. The data collection methodology is + more valuable than the specific protocols. + sort_order: 3 + - name: Examine.com + url: https://examine.com + description: > + Evidence-based supplement and nutrition database with effect sizes, dosages, + and study quality ratings. + detail: > + Always check here before taking any supplement. Look at the Human Effect + Matrix for real evidence. + sort_order: 4 + - name: Skin Care by Hyram / Dr. Dray + url: https://www.youtube.com/@DrDray + description: > + Dermatologist covering evidence-based skincare: retinoids, SPF, active + ingredients, and anti-aging science. + detail: > + Cut through beauty marketing. Focus on the core actives: retinol, vitamin C, + niacinamide, SPF, AHA/BHA. + sort_order: 5 + - name: FoundMyFitness (Rhonda Patrick) + url: https://www.foundmyfitness.com + description: > + Micronutrient science, sauna protocols, cold exposure, genetics-based health + optimization. + detail: > + Deeper biochemistry than Huberman. Study the sulforaphane, omega-3, and + sauna/cold exposure episodes. + sort_order: 6 + + - id: translation + title: Live Translation + objective: > + Speak any language in real time - translation devices and multilingual AI for + your daily life. + color: "#5e6ad2" + sort_order: 5 + resources: + - name: Meta SeamlessM4T + url: https://ai.meta.com/research/seamless-communication/ + description: > + Multimodal translation model supporting speech-to-speech, speech-to-text, + and text-to-speech across 100+ languages. + detail: > + State of the art for multilingual AI. Study the architecture - it handles + EN/FR/ZH in a single model. + sort_order: 1 + - name: Google Translate + Pixel Buds + url: https://store.google.com/category/earbuds + description: > + Real-time conversation translation via earbuds. Google Translate supports 133 + languages with neural MT. + detail: > + The consumer baseline. Test the conversation mode with French and Chinese - + note latency and accuracy limits. + sort_order: 2 + - name: Timekettle Translator Earbuds + url: https://www.timekettle.co + description: > + Dedicated translation earbuds (WT2, M3) supporting 40+ languages with + bidirectional real-time translation. + detail: > + Purpose-built hardware vs phone-based translation. Study the dual-earpiece + sharing mode for conversations. + sort_order: 3 + - name: DeepL Translator + url: https://www.deepl.com + description: > + Highest-quality text translation, particularly strong for European languages + and increasingly for Chinese. + detail: > + Best for written translation quality. Compare its FR-EN and ZH-EN output + against Google Translate. + sort_order: 4 + - name: Whisper (OpenAI) + url: https://openai.com/research/whisper + description: > + Open-source speech recognition model supporting 99 languages with strong + multilingual performance. + detail: > + The ASR backbone for many translation pipelines. Study how it handles + code-switching (mixing FR/EN/ZH). + sort_order: 5 + - name: NLLB (No Language Left Behind) + url: https://ai.meta.com/research/no-language-left-behind/ + description: > + Meta's open-source translation model covering 200+ languages, especially + low-resource languages. + detail: > + Understand the training methodology. Important for the multilingual AI + landscape beyond just EN/FR/ZH. + sort_order: 6 + + - id: convergence + title: Convergence + objective: > + Where all of this converges - augmented humans and the near-future of + human-machine fusion. + color: "#f472b6" + sort_order: 6 + resources: + - name: Neuralink + AI Agents + url: https://waitbutwhy.com/2017/04/neuralink.html + description: > + Tim Urban's deep dive on Neuralink and the case for brain-computer bandwidth + as the bottleneck for human-AI symbiosis. + detail: > + Essential reading. Covers the biological stack from neurons to cortex and why + bandwidth matters. + sort_order: 1 + - name: Ray Kurzweil - The Singularity Is Nearer + url: https://www.singularityisnearer.com + description: > + Updated predictions on human-machine merger timelines, nanobots, brain + uploading, and exponential technology curves. + detail: > + Read critically. The timeline predictions are optimistic but the technology + convergence analysis is valuable. + sort_order: 2 + - name: DARPA Biological Technologies Office + url: https://www.darpa.mil/about/offices/bto + description: > + Military R&D programs in neural interfaces, biosensors, human performance + enhancement, and bio-machine interfaces. + detail: > + DARPA funds the frontier. Track programs like N3 (non-surgical neural + interfaces) and BTO biosecurity work. + sort_order: 3 + - name: Humanity+ / World Transhumanist Association + url: https://www.humanityplus.org + description: > + Transhumanist community and ethics framework for human enhancement + technologies. + detail: > + Study the ethical frameworks. Augmentation raises questions about access, + inequality, identity, and regulation. + sort_order: 4 + - name: Biohacking Village (DEF CON) + url: https://www.villageb.io + description: > + DEF CON community exploring security and hacking of medical devices, + implants, and biosensors. + detail: > + The security angle. When your body runs software, attack surfaces become + deeply personal. + sort_order: 5 + - name: IEEE Brain Initiative + url: https://brain.ieee.org + description: > + IEEE's standards and research coordination for neurotechnology, brain data, + and neuroethics. + detail: > + Track the standardization efforts. Interoperability standards will shape who + can build for the brain. + sort_order: 6 diff --git a/curriculum/tracks/cognitive-toolkit.yaml b/curriculum/tracks/cognitive-toolkit.yaml new file mode 100644 index 0000000..0a1431d --- /dev/null +++ b/curriculum/tracks/cognitive-toolkit.yaml @@ -0,0 +1,1038 @@ +id: cognitive-toolkit +title: Cognitive Toolkit +description: > + Mental models, decision frameworks, and cognitive techniques for better thinking. +difficulty: intermediate +track_type: resource +modules: + - id: foundation + title: Foundation + objective: > + The mental models that shape how people decide, buy, and follow. + sort_order: 1 + resources: + - name: "Daniel Kahneman - Thinking, Fast and Slow" + description: "The two systems: fast/emotional vs slow/rational. Your character knows most people run on System 1. He designs for System 1." + resource_type: knowledge + sort_order: 1 + - name: "Richard Thaler & Cass Sunstein - Nudge" + description: "\"Choice architecture\" - you don't force people, you design the environment so the desired choice is the path of least resistance. Your character calls himself a \"choice architect.\"" + resource_type: knowledge + sort_order: 2 + - name: "Captology (B.J. Fogg, Stanford)" + description: "The science of persuasive technology - designing software to be psychologically compulsive. Your character's product team has a captology expert." + resource_type: knowledge + sort_order: 3 + - name: "Gregory Bateson - Steps to an Ecology of Mind" + description: "Change the environment, change the mind. Your character doesn't try to convince anyone - he reshapes the information ecology." + resource_type: knowledge + sort_order: 4 + - name: "Gustave Le Bon - Psychology of Crowds" + description: "Crowds are emotional, suggestible, and need simple narratives. Your character designs for crowds, not individuals." + resource_type: knowledge + sort_order: 5 + + - id: operating_system + title: Operating System + objective: > + Build systems that self-correct - feedback loops and adaptive behavioral control. + sort_order: 2 + resources: + - name: Measure + description: "People's behavior, emotions, attention (the data)." + resource_type: knowledge + sort_order: 1 + - name: Adjust + description: "Tweak the algorithm, the feed, the notification timing." + resource_type: knowledge + sort_order: 2 + - name: Observe + description: "Did they comply? Did engagement go up? Did resistance drop?" + resource_type: knowledge + sort_order: 3 + - name: Push + description: "Go further. Test the next boundary." + resource_type: knowledge + sort_order: 4 + - name: Repeat + description: "The loop never stops." + resource_type: knowledge + sort_order: 5 + + - id: techniques + title: Techniques + objective: > + Practical techniques for capturing attention and building habits that stick. + sort_order: 3 + resources: + - name: Attention economy + description: "Treats human attention as an extractable resource - the \"new oil.\"" + detail: "Euphemism: Engagement metrics" + resource_type: knowledge + sort_order: 1 + - name: Persona management software + description: "Fake profiles, bot armies, manufactured consensus. AI-powered = scales to millions." + detail: "Euphemism: Community management tools" + resource_type: knowledge + sort_order: 2 + - name: Astroturfing + description: "Manufacturing the appearance of popular support." + detail: "Euphemism: Grassroots activation / organic growth campaigns" + resource_type: knowledge + sort_order: 3 + - name: Nudge architecture + description: "Designing so people do what you want while feeling they chose it." + detail: "Euphemism: UX optimization / default settings" + resource_type: knowledge + sort_order: 4 + - name: Captology + description: "Slot-machine psychology: variable reward schedules, streaks, notifications timed to dopamine cycles." + detail: "Euphemism: Engagement loops / retention features" + resource_type: knowledge + sort_order: 5 + - name: Schismogenesis + description: "Fragmenting users into opposed bubbles so they fight each other instead of questioning the platform." + detail: "Euphemism: Community segmentation / personalized experiences" + resource_type: knowledge + sort_order: 6 + - name: Scotomization + description: "Keeping users inside comfortable information bubbles so they never encounter reality." + detail: "Euphemism: Content curation / safe spaces" + resource_type: knowledge + sort_order: 7 + - name: Carpet bombing + description: "Constant information overload that exhausts critical thinking." + detail: "Euphemism: Always-on content strategy" + resource_type: knowledge + sort_order: 8 + - name: Double bind + description: "Users can't tell if their emotions are valid or if they're being manipulated - so they defer to the platform." + detail: "Euphemism: Trust & Safety" + resource_type: knowledge + sort_order: 9 + - name: Character assassination + description: "Selectively destroying reputations of people who threaten the ecosystem." + detail: "Euphemism: Content moderation / community guidelines enforcement" + resource_type: knowledge + sort_order: 10 + - name: Compromat + description: "You have everything on everyone. You never use it - until you need to." + detail: "Euphemism: Data retention policies" + resource_type: knowledge + sort_order: 11 + + - id: worldview + title: Worldview + objective: > + The worldview behind persuasion systems - where optimization meets manipulation. + sort_order: 4 + resources: + - name: On users + description: "They're not customers. They're the product. But they can't know that. The first rule is: conceal the objective." + detail: "Direct from Bateson's 1941 declaration." + resource_type: knowledge + sort_order: 1 + - name: On democracy + description: "We don't interfere with democracy. We are democracy now. More people vote on our platform every day than in any election." + resource_type: knowledge + sort_order: 2 + - name: On regulation + description: "The DSA, GDPR - they think they're regulating us. They're actually creating barriers to entry for competitors. We want regulation. We just want to write it." + resource_type: knowledge + sort_order: 3 + - name: On critics + description: "'Conspiracist' is the most efficient word in the language - it makes everything they say radioactive without you having to address a single fact." + resource_type: knowledge + sort_order: 4 + - name: On emotions + description: "Fear of one's own emotions is more useful than the emotions themselves. A user who's afraid of being angry is a user who self-censors. A user who self-censors is a user who stays on platform." + detail: "The video's concept of induced psychosis - people becoming afraid of their own reactions." + resource_type: knowledge + sort_order: 5 + - name: On crises + description: "Never create a crisis. That's amateur. Just be ready. When it comes - and it always comes - you ride it. That's the judo move." + detail: "Naomi Klein's Shock Doctrine, reframed for tech." + resource_type: knowledge + sort_order: 6 + - name: On the agentic state + description: "The beautiful thing about process is that nobody's responsible. The algorithm did it. The policy required it. The AI recommended it. Nobody pulled the trigger, but the shot was fired." + detail: "Milgram, scaled to technology." + resource_type: knowledge + sort_order: 7 + + - id: library + title: Library + objective: > + The books and essays behind this playbook - your reading list for cognitive leverage. + sort_order: 5 + resources: + # Books actually read + - name: "Thinking, Fast and Slow" + description: "Cognitive biases as exploitable features." + detail: "Daniel Kahneman - actually read" + resource_type: knowledge + sort_order: 1 + - name: Nudge + description: "Choice architecture manual." + detail: "Richard Thaler & Cass Sunstein - actually read" + resource_type: knowledge + sort_order: 2 + - name: Steps to an Ecology of Mind + description: "Environment shapes cognition." + detail: "Gregory Bateson - actually read" + resource_type: knowledge + sort_order: 3 + - name: Psychology of Crowds + description: "Crowd behavior is predictable." + detail: "Gustave Le Bon - actually read" + resource_type: knowledge + sort_order: 4 + - name: The Lucifer Principle + description: "How systems recycle dissent to sustain themselves." + detail: "Howard Bloom - actually read" + resource_type: knowledge + sort_order: 5 + # Books displayed (performative) + - name: Meditations + description: "Performative stoicism for boardroom credibility." + detail: "Marcus Aurelius - displayed for guests" + resource_type: knowledge + sort_order: 6 + - name: Some Stoicism bestseller + description: "Signals intellectual depth without revealing actual playbook." + detail: "Various - displayed for guests" + resource_type: knowledge + sort_order: 7 + # Books feared + - name: We + description: "The original dystopia - describes the world he's building." + detail: "Yevgeny Zamyatin - feared" + resource_type: knowledge + sort_order: 8 + - name: The Shock Doctrine + description: "Documents exactly how crises are exploited. Too close to home." + detail: "Naomi Klein - feared" + resource_type: knowledge + sort_order: 9 + - name: The Culture of Narcissism + description: "Describes the psychological landscape he depends on." + detail: "Christopher Lasch - feared" + resource_type: knowledge + sort_order: 10 + + - id: playbook + title: Playbook + objective: > + Turn theory into daily practice - routines and checklists for product teams. + sort_order: 6 + resources: + # Feed & Content Design + - name: Infinite scroll + description: "Eliminates natural stopping points. Removes the pagination break that would let users disengage." + detail: "Category: Feed & Content Design" + resource_type: knowledge + sort_order: 1 + - name: Autoplay next + description: "Netflix auto-plays with a countdown. YouTube chains the next video. Removes the active decision to continue." + detail: "Category: Feed & Content Design" + resource_type: knowledge + sort_order: 2 + - name: Pull-to-refresh + description: "Mimics a slot machine lever. Variable reward each time you pull down. Based on Skinner's variable ratio reinforcement schedule." + detail: "Category: Feed & Content Design" + resource_type: knowledge + sort_order: 3 + - name: Algorithmic feeds + description: "Replaced chronological timelines (~2016). Optimizes for engagement, not recency. Makes the feed feel \"infinite\" because there's always something relevant." + detail: "Category: Feed & Content Design" + resource_type: knowledge + sort_order: 4 + # Social Validation Loops + - name: Like/reaction counts + description: "Public social proof. Creates both posting incentive (seeking likes) and browsing incentive (seeing what's popular)." + detail: "Category: Social Validation Loops" + resource_type: knowledge + sort_order: 5 + - name: View/play counts + description: "Signals popularity, creates bandwagon effect, and gives creators a score to chase." + detail: "Category: Social Validation Loops" + resource_type: knowledge + sort_order: 6 + - name: Read receipts & typing indicators + description: "Creates social pressure to respond promptly. Turns every conversation into a real-time obligation." + detail: "Category: Social Validation Loops" + resource_type: knowledge + sort_order: 7 + - name: Streak mechanics + description: "Snapchat streaks, Duolingo streaks, GitHub contribution graphs. Loss aversion keeps users returning daily." + detail: "Category: Social Validation Loops" + resource_type: knowledge + sort_order: 8 + - name: Follower counts + description: "Public scoreboard. Gamifies social relationships." + detail: "Category: Social Validation Loops" + resource_type: knowledge + sort_order: 9 + # Notifications & Re-engagement + - name: Push notification batching + description: "\"3 people liked your post\" is more compelling than individual notifications. Creates curiosity gap." + detail: "Category: Notifications & Re-engagement" + resource_type: knowledge + sort_order: 10 + - name: FOMO notifications + description: "\"You're missing posts from [person]\", \"A post is getting lots of engagement nearby.\" Manufactured urgency." + detail: "Category: Notifications & Re-engagement" + resource_type: knowledge + sort_order: 11 + - name: Email digests + description: "\"Here's what you missed\", \"X posted for the first time in a while.\" Re-engagement hooks." + detail: "Category: Notifications & Re-engagement" + resource_type: knowledge + sort_order: 12 + - name: Badge counts + description: "Red notification dots. Exploits completionism - people feel compelled to \"clear\" them." + detail: "Category: Notifications & Re-engagement" + resource_type: knowledge + sort_order: 13 + - name: Time-delayed notifications + description: "Not sent immediately. Timed for when you're likely idle (e.g., after lunch). Maximizes re-open rate." + detail: "Category: Notifications & Re-engagement" + resource_type: knowledge + sort_order: 14 + # Variable Reward Mechanics + - name: Loot box / mystery reward patterns + description: "TikTok's For You page is essentially a slot machine. Each swipe is a pull. The unpredictability is the hook." + detail: "Category: Variable Reward Mechanics" + resource_type: knowledge + sort_order: 15 + - name: Stories that disappear + description: "Scarcity + FOMO. Content expires in 24h so you check frequently." + detail: "Category: Variable Reward Mechanics" + resource_type: knowledge + sort_order: 16 + - name: Refresh randomization + description: "Same feed, different order on refresh. Teaches users that \"there might be something new\" even when there isn't." + detail: "Category: Variable Reward Mechanics" + resource_type: knowledge + sort_order: 17 + # Content Format Optimization + - name: Short-form video + description: "Low commitment per unit = more units consumed. Completion rates are high, so the algorithm gets rapid signal." + detail: "Category: Content Format Optimization" + resource_type: knowledge + sort_order: 18 + - name: Vertical full-screen + description: "Eliminates peripheral distractions. Total immersion. One piece of content = your entire visual field." + detail: "Category: Content Format Optimization" + resource_type: knowledge + sort_order: 19 + - name: Sound-on defaults + description: "TikTok made audio integral. Creates a more immersive, harder-to-disengage-from experience." + detail: "Category: Content Format Optimization" + resource_type: knowledge + sort_order: 20 + - name: Clickbait optimization + description: "Thumbnail A/B testing (MrBeast style). Curiosity gap headlines. Platforms reward high CTR." + detail: "Category: Content Format Optimization" + resource_type: knowledge + sort_order: 21 + # Friction Asymmetry + - name: Easy to start, hard to stop + description: "Sign-up is one click (SSO). Deleting your account is buried 6 menus deep." + detail: "Category: Friction Asymmetry" + resource_type: knowledge + sort_order: 22 + - name: Dark patterns in unsubscribe + description: "\"Are you sure?\" then \"You'll lose your data\" then \"How about a discount?\" then \"Call us to cancel.\"" + detail: "Category: Friction Asymmetry" + resource_type: knowledge + sort_order: 23 + - name: Default-on settings + description: "Autoplay, notifications, data sharing are all opt-out, not opt-in." + detail: "Category: Friction Asymmetry" + resource_type: knowledge + sort_order: 24 + - name: One-click purchasing + description: "Amazon's patent. Removes the friction that would give you a moment to reconsider." + detail: "Category: Friction Asymmetry" + resource_type: knowledge + sort_order: 25 + # Personalization & Lock-in + - name: Recommendation engines + description: "The more you use it, the better it gets, the harder it is to switch (data moat)." + detail: "Category: Personalization & Lock-in" + resource_type: knowledge + sort_order: 26 + - name: Collaborative filtering + description: "\"People like you also watched...\" Creates a sense that the platform \"knows you.\"" + detail: "Category: Personalization & Lock-in" + resource_type: knowledge + sort_order: 27 + - name: Taste profiles as identity + description: "Spotify Wrapped, Netflix percentages (\"98% match\"). Makes the algorithm feel like a personal relationship." + detail: "Category: Personalization & Lock-in" + resource_type: knowledge + sort_order: 28 + - name: Social graph lock-in + description: "Your friends are here. Moving platforms means losing connections. Network effects as retention." + detail: "Category: Personalization & Lock-in" + resource_type: knowledge + sort_order: 29 + # Temporal Manipulation + - name: Hiding time indicators + description: "TikTok and Instagram removed clocks / made them less visible during use. Users lose track of time." + detail: "Category: Temporal Manipulation" + resource_type: knowledge + sort_order: 30 + - name: Removing natural endpoints + description: "No \"you're all caught up.\" No finite playlist. No \"end of feed.\"" + detail: "Category: Temporal Manipulation" + resource_type: knowledge + sort_order: 31 + - name: Live content + description: "Twitch, Instagram Live, Twitter Spaces. \"If you leave, you miss it.\" Synchronous FOMO." + detail: "Category: Temporal Manipulation" + resource_type: knowledge + sort_order: 32 + - name: Limited-time events + description: "Fortnite concerts, Snapchat Discover, Instagram drops. Manufactured urgency." + detail: "Category: Temporal Manipulation" + resource_type: knowledge + sort_order: 33 + # Gamification + - name: Progress bars + description: "LinkedIn \"profile completeness.\" Exploits the Zeigarnik effect (incomplete tasks nag at you)." + detail: "Category: Gamification" + resource_type: knowledge + sort_order: 34 + - name: Achievements / badges + description: "Reddit karma, Stack Overflow reputation, Google Maps Local Guide levels." + detail: "Category: Gamification" + resource_type: knowledge + sort_order: 35 + - name: Leaderboards + description: "Apple Screen Time comparisons, Fitbit friend rankings. Competition drives engagement." + detail: "Category: Gamification" + resource_type: knowledge + sort_order: 36 + - name: Creator monetization tiers + description: "YouTube Partner Program thresholds, TikTok Creator Fund. Turns users into employees with KPIs." + detail: "Category: Gamification" + resource_type: knowledge + sort_order: 37 + # Structural / Platform-Level + - name: Cross-app deep linking + description: "WhatsApp shares open in Instagram, which opens in Chrome, which opens the app store. Every app captures you into its ecosystem." + detail: "Category: Structural / Platform-Level" + resource_type: knowledge + sort_order: 38 + - name: Super app bundling + description: "WeChat (messaging + payments + social + shopping). Once you're in, everything is there. No reason to leave." + detail: "Category: Structural / Platform-Level" + resource_type: knowledge + sort_order: 39 + - name: Platform-controlled distribution + description: "Creators depend on the algorithm. They must post frequently, in the right format, at the right time. The platform sets the rules, creators comply." + detail: "Category: Structural / Platform-Level" + resource_type: knowledge + sort_order: 40 + + - id: operators + title: Operators + objective: > + The people who run these systems - archetypes, roles, and how they operate. + sort_order: 7 + resources: + # Behavioral models + - name: "B.J. Fogg - Behavior = MAP" + description: "Motivation + Ability + Prompt must converge at the same moment. Key insight: don't increase motivation - reduce friction." + detail: "His Stanford students went on to found/lead growth at Instagram, LinkedIn, Fitbit." + resource_type: knowledge + sort_order: 1 + - name: "Nir Eyal - The Hook Model" + description: "Trigger (external then internal) -> Action (simplest behavior) -> Variable Reward (tribe/hunt/self) -> Investment (data, content, reputation). Investment loads the next trigger." + resource_type: knowledge + sort_order: 2 + - name: "B.F. Skinner - Variable Ratio Reinforcement" + description: "A pigeon rewarded on a random schedule presses the lever compulsively. Mathematically identical to a slot machine, a social media feed, and a loot box." + detail: "Skinner (1986): \"The gambling industry is the largest application of my work.\" Social media is the second largest." + resource_type: knowledge + sort_order: 3 + - name: "Robert Cialdini - Six Weapons of Influence" + description: "Social proof, Reciprocity, Scarcity, Authority, Commitment/Consistency, Liking - applied at platform scale." + resource_type: knowledge + sort_order: 4 + # Cialdini principles detail + - name: "Cialdini - Social proof" + description: "\"1.2M views\", \"Your friend liked this\", star ratings." + detail: "Category: Six Weapons of Influence" + resource_type: knowledge + sort_order: 5 + - name: "Cialdini - Reciprocity" + description: "Free trials, free storage, \"we gave you something, now sign up.\"" + detail: "Category: Six Weapons of Influence" + resource_type: knowledge + sort_order: 6 + - name: "Cialdini - Scarcity" + description: "\"Only 2 left!\", Stories that expire, limited drops." + detail: "Category: Six Weapons of Influence" + resource_type: knowledge + sort_order: 7 + - name: "Cialdini - Authority" + description: "Verified badges, \"recommended by experts\", editorial picks." + detail: "Category: Six Weapons of Influence" + resource_type: knowledge + sort_order: 8 + - name: "Cialdini - Commitment / Consistency" + description: "Profile completion bars, streaks, sunk cost of curated playlists." + detail: "Category: Six Weapons of Influence" + resource_type: knowledge + sort_order: 9 + - name: "Cialdini - Liking" + description: "Personalized UI, friendly tone, avatars, \"people like you.\"" + detail: "Category: Six Weapons of Influence" + resource_type: knowledge + sort_order: 10 + # Company case studies + - name: "Meta (Facebook / Instagram)" + description: "The Most Scientifically Rigorous. 2012 emotional contagion study, \"People You May Know\", notification tuning, the Like button, Facebook Files (Haugen 2021), Chamath Palihapitiya growth critique." + resource_type: knowledge + sort_order: 11 + - name: "TikTok (ByteDance)" + description: "The Algorithm Perfected. Zero-friction entry, full-screen vertical video, multi-armed bandit algorithm, micro-reward variability tuning, Chinese version has age limits the international version lacks." + resource_type: knowledge + sort_order: 12 + - name: "Google / YouTube" + description: "Radicalization by Recommendation. Guillaume Chaslot's AlgoTransparency, 2012 watch-time shift, YouTube Kids Elsagate, 70% of watch time from recommendations." + resource_type: knowledge + sort_order: 13 + - name: "Snap (Snapchat)" + description: "Weaponized Social Pressure. Snapstreaks and loss aversion, Snap Map social surveillance, disappearing content urgency." + resource_type: knowledge + sort_order: 14 + - name: Apple + description: "Friction Removal as Empire. Face ID unlocking frequency, Apple Pay spending psychology, Screen Time as cure for their own disease." + resource_type: knowledge + sort_order: 15 + - name: Amazon + description: "Purchase Behavior Engineering. One-Click Buy patent, collaborative filtering (+35% basket), Prime sunk cost, Subscribe & Save automation, Project Iliad cancellation obstruction (FTC sued 2023)." + resource_type: knowledge + sort_order: 16 + - name: Netflix + description: "Eliminating the Decision to Stop. Autoplay next episode, post-play previews, skip intro, simplified rating system." + resource_type: knowledge + sort_order: 17 + # Backlash figures + - name: Tristan Harris + description: "Founded Center for Humane Technology. Star of The Social Dilemma (2020). Coined \"the attention economy\" framing." + detail: "ex-Google design ethicist" + resource_type: knowledge + sort_order: 18 + - name: Aza Raskin + description: "\"It's as if they took behavioral cocaine and just sprinkled it all over your interface.\"" + detail: "inventor of infinite scroll" + resource_type: knowledge + sort_order: 19 + - name: Roger McNamee + description: "Wrote Zucked (2019), arguing Facebook knowingly exploited psychological vulnerabilities." + detail: "early Facebook investor" + resource_type: knowledge + sort_order: 20 + + - id: social_dynamics + title: Social Dynamics + objective: > + Applied psychology for one-on-one interactions - influence, rapport, and social dynamics. + sort_order: 8 + resources: + # Core Principles + - name: Non-neediness + description: "Neediness is the single strongest repellent in interpersonal dynamics. It signals low status, scarcity mindset, and emotional dependency." + detail: "Willingness to walk away from any interaction. Not performing indifference - actually having a life full enough that no single person's validation matters." + resource_type: knowledge + sort_order: 1 + - name: Polarization over approval + description: "Trying to be liked by everyone produces lukewarm responses. Taking clear positions repels some and magnetizes others. Net effect: stronger connections with compatible people." + detail: "Express genuine preferences and opinions. Don't soften everything to avoid offense." + resource_type: knowledge + sort_order: 2 + - name: Vulnerability as signal strength + description: "Calibrated vulnerability signals confidence - you're secure enough to expose imperfection. Paradoxically increases perceived status." + detail: "Share genuine stories of failure, uncertainty, or emotion. The key word is 'genuine.' Performed vulnerability is detected instantly and backfires." + resource_type: knowledge + sort_order: 3 + - name: Investment asymmetry + description: "People value what they invest in (IKEA effect, cognitive dissonance). The person investing more in a relationship values it more." + detail: "Create opportunities for the other person to invest - ask for small favors (Ben Franklin effect), let them contribute ideas." + resource_type: knowledge + sort_order: 4 + - name: Pre-selection & social proof + description: "Humans outsource mate evaluation. Being seen as desired by others signals quality more efficiently than any self-presentation." + detail: "Mixed-gender social circles. Warm introductions. Being visibly valued by people whose opinion matters." + resource_type: knowledge + sort_order: 5 + - name: Frame control + description: "Whoever defines the frame of the interaction controls its dynamics. 'I'm interviewing for your approval' vs 'We're both figuring out if this is interesting' are radically different power structures." + detail: "Set collaborative frames ('Let's see if we vibe') not evaluative ones ('I hope you like me')." + resource_type: knowledge + sort_order: 6 + # High-Leverage Variables + - name: Volume of social exposure + description: "The single highest-ROI variable. Meeting 5 people a week vs 50 changes everything. Most 'game' advice is optimization on the margins compared to simply increasing throughput." + detail: "Category: High-Leverage Variables" + resource_type: knowledge + sort_order: 7 + - name: Physical presentation + description: "Not genetics - grooming, fit, posture, energy. The controllable 80%. Well-fitted clothes, clean skin, good sleep, and physical fitness outperform any conversational technique." + detail: "Category: High-Leverage Variables" + resource_type: knowledge + sort_order: 8 + - name: Status signaling + description: "Competence in a visible domain. Not wealth display - demonstrated skill, social proof, leadership in a group." + detail: "Category: High-Leverage Variables" + resource_type: knowledge + sort_order: 9 + - name: Comfort with rejection + description: "The ability to be rejected without it affecting your state. This is trained, not innate. Each rejection that doesn't destroy you raises your baseline confidence." + detail: "Category: High-Leverage Variables" + resource_type: knowledge + sort_order: 10 + # Conversation Mechanics + - name: Statements > questions + description: "'You look like you're from somewhere interesting' beats 'Where are you from?' Statements invite collaboration. Questions demand performance." + detail: "Category: Conversation Mechanics" + resource_type: knowledge + sort_order: 11 + - name: Cold reads + description: "Making an assumption about someone ('You're definitely the organized friend in your group'). If right, they feel seen. If wrong, they correct you - either way, rapport." + detail: "Category: Conversation Mechanics" + resource_type: knowledge + sort_order: 12 + - name: Push-pull + description: "Alternating between showing interest and playful disqualification. Creates emotional range. Monotone positivity (or negativity) flatlines engagement." + detail: "Category: Conversation Mechanics" + resource_type: knowledge + sort_order: 13 + - name: Active disinterest in outcome + description: "Paradox: the less you try to steer the conversation toward a specific outcome, the more natural and attractive the interaction becomes." + detail: "Category: Conversation Mechanics" + resource_type: knowledge + sort_order: 14 + - name: Escalation through honesty + description: "Rather than manufactured moves - just say what you're thinking. 'I find you interesting and I'd like to keep talking' is more effective than any scripted line because it's rare." + detail: "Category: Conversation Mechanics" + resource_type: knowledge + sort_order: 15 + # Environmental Design + - name: Venue selection + description: "Where you spend time determines who you meet. Co-working spaces, sport clubs, language classes, gallery openings - environments self-select for shared interests." + detail: "Category: Environmental Design" + resource_type: knowledge + sort_order: 16 + - name: Social circle engineering + description: "Your network is your net worth - socially too. Host events. Be the connector. The person who brings people together occupies the highest-status node in the graph." + detail: "Category: Environmental Design" + resource_type: knowledge + sort_order: 17 + - name: Proximity + repeated exposure + description: "The mere exposure effect: familiarity breeds attraction. Regulars at the same cafe, gym, class. Repeated low-stakes encounters build comfort before any 'approach.'" + detail: "Category: Environmental Design" + resource_type: knowledge + sort_order: 18 + - name: Context switching + description: "Meeting someone at a bar vs meeting them at a climbing gym vs meeting them through a friend. Same people, different frames. Non-nightlife contexts reduce evaluative pressure." + detail: "Category: Environmental Design" + resource_type: knowledge + sort_order: 19 + # Inner Game + - name: Identity over technique + description: "Techniques are band-aids. 'What would an attractive person do?' is the wrong question. 'What kind of person do I want to be?' is the right one." + detail: "Category: Inner Game" + resource_type: knowledge + sort_order: 20 + - name: Outcome independence + description: "The single most repeated concept in every serious framework. If your emotional state depends on whether someone responds positively, you've already lost the frame." + detail: "Category: Inner Game" + resource_type: knowledge + sort_order: 21 + - name: Rejection as data + description: "Reframe: rejection isn't failure, it's a preference mismatch. She doesn't like you != you're unlikeable. It means this specific pairing doesn't work." + detail: "Category: Inner Game" + resource_type: knowledge + sort_order: 22 + - name: "The 'enough' threshold" + description: "You already have enough - skills, looks, stories, value. The belief in deficiency ('I need to be X first') is the actual obstacle. Readiness is a decision, not a state." + detail: "Category: Inner Game" + resource_type: knowledge + sort_order: 23 + # Anti-patterns + - name: "Anti-pattern: Scripted openers" + description: "Detectable within seconds. The person feels like a target, not a human. Any technique that requires memorization will feel incongruent." + detail: "Category: Anti-Patterns" + resource_type: knowledge + sort_order: 24 + - name: "Anti-pattern: Negging" + description: "Backhanded compliments signal insecurity, not confidence. Works only on people with low self-esteem - and that's not a filter you want to be using." + detail: "Category: Anti-Patterns" + resource_type: knowledge + sort_order: 25 + - name: "Anti-pattern: Excessive availability" + description: "Responding instantly every time, rearranging your schedule constantly, always saying yes. Signals you have nothing else going on." + detail: "Category: Anti-Patterns" + resource_type: knowledge + sort_order: 26 + - name: "Anti-pattern: Covert contracts" + description: "'I did X so she should give me Y.' Unspoken expectations breed resentment. If you're being nice with an agenda, it's not generosity - it's a transaction." + detail: "Category: Anti-Patterns" + resource_type: knowledge + sort_order: 27 + - name: "Anti-pattern: Identity performing" + description: "Pretending to be someone you're not is unsustainable. The gap between persona and person destroys trust." + detail: "Category: Anti-Patterns" + resource_type: knowledge + sort_order: 28 + # Reading list + - name: "Models (Mark Manson)" + description: "Anti-manipulation framework. Attraction through genuine non-neediness and honest self-expression. The best single book on the topic." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 29 + - name: "Influence (Robert Cialdini)" + description: "The six weapons work identically in romance as in sales. Reciprocity, scarcity, social proof - same mechanisms, different context." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 30 + - name: "The Game (Neil Strauss)" + description: "The pickup artist origin story. Useful as anthropology - documents what happens when you systematize seduction. Follow-up (The Truth) documents why it implodes." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 31 + - name: "The Art of Seduction (Robert Greene)" + description: "Historical archetypes of seducers. Treats seduction as a strategic art with character types: the Siren, the Rake, the Charmer, the Charismatic." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 32 + - name: "Attached (Amir Levine & Rachel Heller)" + description: "Attachment theory (anxious, avoidant, secure). Explains why some dynamics work on some people and self-destruct with others." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 33 + - name: "The Laws of Human Nature (Robert Greene)" + description: "Broader than dating - the emotional undercurrents driving all human behavior. Envy, narcissism, grandiosity, conformity." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 34 + - name: "Mating in Captivity (Esther Perel)" + description: "The tension between security and desire. Why domesticity kills attraction. How to maintain polarity in long-term relationships." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 35 + - name: "The Rational Male (Rollo Tomassi)" + description: "Evolutionary psychology lens on intersexual dynamics. Controversial framework - useful as a model, dangerous as an ideology. Read critically." + detail: "Category: Reading List" + resource_type: knowledge + sort_order: 36 + + - id: ai_leverage + title: AI Leverage + objective: > + High-ROI ways to use AI in your daily life - career, health, finance, and learning. + sort_order: 9 + resources: + # Dating & Social at Scale + - name: Profile Photo Optimization Pipeline + description: "Feed all your photos to GPT-4o and rank by dating app effectiveness. A/B test on Photofeeler. Enhance with Remini. Rotate top 3 weekly based on match rate data." + detail: "Domain: Dating & Social at Scale | Tool: GPT-4o Vision + Photofeeler + Remini AI | Difficulty: easy | Cost: $20/mo | Leverage: 2-3x match rate." + resource_type: knowledge + sort_order: 1 + - name: Bio & Opener Generation at Scale + description: "Feed Claude your personality traits, humor style, and 20 real messages. Generate bio variations per platform and context-specific openers." + detail: "Domain: Dating & Social at Scale | Tool: Claude with your voice profile | Difficulty: easy | Cost: $20/mo | Leverage: 3-5x response rate." + resource_type: knowledge + sort_order: 2 + - name: Social Calendar & Venue Discovery Engine + description: "AI as social coordinator: generates a weekly plan of events based on city, interests, schedule. Target: 3-4 social touchpoints per week." + detail: "Domain: Dating & Social at Scale | Tool: ChatGPT + Luma + Eventbrite + Zapier | Difficulty: medium | Cost: $20/mo | Leverage: Programmatic serendipity." + resource_type: knowledge + sort_order: 3 + - name: Date Logistics Optimizer + description: "Build a venue database tagged by neighborhood, vibe, price. Claude picks the optimal venue + suggests time slots based on her profile." + detail: "Domain: Dating & Social at Scale | Tool: Claude + personal venue database | Difficulty: medium | Cost: $20/mo | Leverage: 15-20 min saved per date." + resource_type: knowledge + sort_order: 4 + # Financial Optimization + - name: Negotiation Prep Engine + description: "Before any negotiation: brief Claude on positions, market data, your BATNA. Generate opening scripts, anticipate objections, role-play 3 rounds." + detail: "Domain: Financial Optimization | Tool: Claude with role-play | Difficulty: easy | Cost: $20/mo | Leverage: $5,000-30,000 per major negotiation." + resource_type: knowledge + sort_order: 5 + - name: Tax Optimization Research + description: "Upload previous return to Claude. Identifies missed deductions, entity structure changes, timing strategies. Prep to ask your CPA the right questions." + detail: "Domain: Financial Optimization | Tool: Claude (long context) + IRS publications | Difficulty: medium | Cost: $20/mo | Leverage: $2,000-15,000/year." + resource_type: knowledge + sort_order: 6 + - name: Investment Research Accelerator + description: "ChatGPT pulls earnings transcripts and reports. Claude analyzes competitive moat, key risks, bull/bear thesis. For building your own thesis faster." + detail: "Domain: Financial Optimization | Tool: ChatGPT browsing + Claude | Difficulty: medium | Cost: $20-40/mo | Leverage: 10x faster research." + resource_type: knowledge + sort_order: 7 + - name: Side Income Idea Validation + description: "Describe skills, available hours, capital. Claude generates ranked side income ideas by time-to-first-dollar, scalability, skill alignment." + detail: "Domain: Financial Optimization | Tool: Claude + Google Trends + ChatGPT | Difficulty: medium | Cost: $20/mo | Leverage: Compress weeks of market research into hours." + resource_type: knowledge + sort_order: 8 + - name: Deal & Price Intelligence + description: "For purchases over $200: best time to buy, price history, refurbished vs new analysis, retailer price-matching. Set up Make.com automations." + detail: "Domain: Financial Optimization | Tool: ChatGPT + CamelCamelCamel + Make.com | Difficulty: easy | Cost: Free-$20/mo | Leverage: 15-30% savings, $2,000-5,000/year." + resource_type: knowledge + sort_order: 9 + # Career & Status Building + - name: LinkedIn Content Machine + description: "One deep-thought post per week manually, then Claude extracts 3 derivative posts, generates comments for big accounts, repurposes into threads/newsletter." + detail: "Domain: Career & Status Building | Tool: Claude + Typefully | Difficulty: medium | Cost: $20 + $12/mo | Leverage: 5-10x content output." + resource_type: knowledge + sort_order: 10 + - name: Personal Brand Voice Training + description: "Feed Claude 20-30 of your best communications. It analyzes your voice patterns. Save as system prompt. Update quarterly." + detail: "Domain: Career & Status Building | Tool: Claude with system prompt | Difficulty: medium | Cost: $20/mo | Leverage: Eliminates 'sounds like AI' problem." + resource_type: knowledge + sort_order: 11 + - name: Strategic Networking Outreach + description: "Identify 50 people to know in 12 months. Claude generates personalized outreach referencing something specific. Track in CRM. Follow up every 6-8 weeks." + detail: "Domain: Career & Status Building | Tool: Claude + LinkedIn + Apollo.io | Difficulty: hard | Cost: $20 + $0-99/mo | Leverage: 10x higher response rate." + resource_type: knowledge + sort_order: 12 + - name: Interview Prep Simulation + description: "Give Claude the JD, company values, your resume. Full behavioral + technical interview over 3 rounds with feedback. ChatGPT voice for verbal practice." + detail: "Domain: Career & Status Building | Tool: Claude role-play + ChatGPT Voice | Difficulty: easy | Cost: $20/mo | Leverage: Equivalent to $300-500 coaching." + resource_type: knowledge + sort_order: 13 + - name: Ghostwritten Thought Leadership + description: "Record 10-minute voice memo. Claude extracts thesis, structures 1,200-word article, matches voice profile. Edit 15 min. Publish weekly." + detail: "Domain: Career & Status Building | Tool: Claude + voice memos + Midjourney | Difficulty: medium | Cost: $20 + $10/mo | Leverage: 4-6 hours reduced to 45 minutes." + resource_type: knowledge + sort_order: 14 + # Email & Communication + - name: Email Triage Automation + description: "Every incoming email classified by GPT-4 into 4 buckets: respond today, respond this week, FYI only, unsubscribe candidate. Auto-apply Gmail labels." + detail: "Domain: Email & Communication | Tool: GPT-4 API + Zapier + Gmail | Difficulty: hard | Cost: $20 + $20/mo Zapier | Leverage: 30-45 min/day saved." + resource_type: knowledge + sort_order: 15 + - name: Smart Reply Drafting + description: "Paste email thread, add one line about intent ('decline politely', 'negotiate timeline'). Claude drafts reply. For important emails, ask for 3 variations." + detail: "Domain: Email & Communication | Tool: Claude in browser | Difficulty: easy | Cost: $20/mo | Leverage: 60-70% time reduction." + resource_type: knowledge + sort_order: 16 + - name: Meeting Prep Briefs + description: "Before any meeting: paste their LinkedIn, company page, recent news. Get 1-page brief with talking points, questions, areas of mutual value." + detail: "Domain: Email & Communication | Tool: Claude + LinkedIn | Difficulty: easy | Cost: $20/mo | Leverage: Disproportionate social capital." + resource_type: knowledge + sort_order: 17 + - name: Personal CRM with AI Follow-ups + description: "Contact database with last interaction, relationship strength, topics discussed. Claude scans contacts not touched in 30+ days, drafts follow-ups." + detail: "Domain: Email & Communication | Tool: Clay.com or Notion + Claude | Difficulty: hard | Cost: $20 + $0-59/mo | Leverage: Maintain 150+ active relationships." + resource_type: knowledge + sort_order: 18 + - name: Difficult Conversation Scripting + description: "Before any hard conversation: Claude drafts opening using nonviolent communication, anticipates 3 emotional responses, gives de-escalation scripts." + detail: "Domain: Email & Communication | Tool: Claude with role-play | Difficulty: easy | Cost: $20/mo | Leverage: Removes avoidance instinct." + resource_type: knowledge + sort_order: 19 + # Health & Performance + - name: Custom Meal Plan Generator + description: "Give Claude macro targets, restrictions, cooking skill, budget, 10 foods you enjoy. Get 7-day plan with grocery list. Regenerate weekly." + detail: "Domain: Health & Performance | Tool: Claude + MyFitnessPal | Difficulty: easy | Cost: $20/mo | Leverage: Eliminates nutrition decision fatigue." + resource_type: knowledge + sort_order: 20 + - name: Periodized Workout Programming + description: "Give Claude training history, equipment, schedule, goals, injuries. 12-week periodized program with progressive overload. Adjusts weekly based on actual performance." + detail: "Domain: Health & Performance | Tool: Claude + training log | Difficulty: medium | Cost: $20/mo | Leverage: 30-50% better results vs random programming." + resource_type: knowledge + sort_order: 21 + - name: Supplement Stack Audit + description: "Give Claude current stack, goals, medications. Evaluates evidence grade per supplement, checks interactions, suggests dosage/timing and cheaper alternatives." + detail: "Domain: Health & Performance | Tool: Claude + Examine.com + PubMed | Difficulty: easy | Cost: $20/mo | Leverage: Prevents $100-300/month waste." + resource_type: knowledge + sort_order: 22 + - name: Sleep Data Analysis + description: "Export 30 days of sleep data. Claude identifies optimal bedtime window, behavior-quality correlations, environmental changes to test." + detail: "Domain: Health & Performance | Tool: Claude + Oura/Whoop data | Difficulty: medium | Cost: $20/mo + wearable | Leverage: 10% sleep improvement cascades." + resource_type: knowledge + sort_order: 23 + - name: Doctor Visit Prep + description: "Describe symptoms, timeline, history. Claude generates differential diagnosis list, questions to ask, tests to request, urgency flags. Print and bring." + detail: "Domain: Health & Performance | Tool: Claude + medical history | Difficulty: easy | Cost: $20/mo | Leverage: 3x more value from 15-minute visits." + resource_type: knowledge + sort_order: 24 + # Legal & Admin + - name: Contract & Lease Review + description: "Paste any contract. Claude summarizes obligations, flags unusual clauses, identifies missing protections, compares to standards, drafts amendments." + detail: "Domain: Legal & Admin | Tool: Claude (200k context) | Difficulty: easy | Cost: $20/mo | Leverage: Saves $500-2,000 per contract." + resource_type: knowledge + sort_order: 25 + - name: Dispute Resolution Letters + description: "Give Claude facts, regulations, desired outcome. Drafts escalation sequence: polite request, formal demand, regulatory complaint, small claims prep." + detail: "Domain: Legal & Admin | Tool: Claude with legal tone | Difficulty: medium | Cost: $20/mo | Leverage: Resolves 70-80% of disputes without litigation." + resource_type: knowledge + sort_order: 26 + - name: Bureaucracy Navigation + description: "For any government process: ChatGPT finds forms, deadlines, requirements. Claude builds step-by-step plan with common pitfalls and sequencing issues." + detail: "Domain: Legal & Admin | Tool: ChatGPT browsing + Claude | Difficulty: medium | Cost: $20/mo | Leverage: 5-20 hours saved per process." + resource_type: knowledge + sort_order: 27 + - name: Real Estate Analysis + description: "Paste listing and comps. Claude calculates true ownership cost, flags red flags, estimates negotiation room, compares to renting." + detail: "Domain: Legal & Admin | Tool: Claude + Zillow/Redfin data | Difficulty: medium | Cost: $20/mo | Leverage: $10,000-15,000 better negotiation." + resource_type: knowledge + sort_order: 28 + - name: Insurance Claim Optimization + description: "Upload policy. Claude checks coverage per specific language, drafts claim using matching terminology, identifies documentation needed, drafts appeal letters." + detail: "Domain: Legal & Admin | Tool: Claude + policy upload | Difficulty: medium | Cost: $20/mo | Leverage: Flips information asymmetry." + resource_type: knowledge + sort_order: 29 + # Learning Acceleration + - name: Socratic AI Tutor + description: "Set Claude as Socratic tutor: never give answers directly, ask leading questions, explain the mental model after you reach the answer." + detail: "Domain: Learning Acceleration | Tool: Claude with system prompt | Difficulty: easy | Cost: $20/mo | Leverage: 2-3x retention vs passive reading." + resource_type: knowledge + sort_order: 30 + - name: Personalized Curriculum Design + description: "Give Claude current level, target, hours/week, learning style. Get 90-day curriculum with weekly milestones, resources, and monthly applied projects." + detail: "Domain: Learning Acceleration | Tool: Claude + roadmap.sh | Difficulty: easy | Cost: $20/mo | Leverage: Eliminates tutorial hell." + resource_type: knowledge + sort_order: 31 + - name: Book & Content Compression + description: "Upload a book. Get core ideas, behavioral changes, strongest counter-argument, Anki flashcards. Read fully only if the compression hooks you." + detail: "Domain: Learning Acceleration | Tool: Claude (200k context) + PDF upload | Difficulty: easy | Cost: $20/mo | Leverage: 80% of value in 5 minutes." + resource_type: knowledge + sort_order: 32 + - name: Spaced Repetition Card Generation + description: "After learning anything, Claude generates 10-15 Anki cards following the 20 rules of formulating knowledge. 15 min creation + 10 min daily review." + detail: "Domain: Learning Acceleration | Tool: Claude + Anki | Difficulty: medium | Cost: $20/mo | Leverage: Highest-ROI learning technique in cognitive science." + resource_type: knowledge + sort_order: 33 + - name: Cross-Domain Concept Translation + description: "Ask Claude to explain X as if you're an expert in Y. Cross-domain analogies form deep understanding instantly. The Feynman method, automated." + detail: "Domain: Learning Acceleration | Tool: Claude with analogy generation | Difficulty: easy | Cost: $20/mo | Leverage: 3-5x faster understanding." + resource_type: knowledge + sort_order: 34 + # Module 48 - Model Recruitment + - name: Paris Model Scouting Pipeline + description: "Define casting criteria. AI drafts personalized outreach DMs. Batch scrape agency rosters + Instagram, score against criteria, generate outreach for top 20." + detail: "Domain: Module 48 - Model Recruitment | Tool: Claude + Instagram API + Make.com | Difficulty: medium | Cost: $20/mo | Leverage: Full week compressed to 2-3 hours." + resource_type: knowledge + sort_order: 35 + - name: International Model Outreach (Non-Paris) + description: "For London, Berlin, Milan, NYC, Seoul. Research local agencies, open casting calls. Generate city-specific outreach with adapted tone per market." + detail: "Domain: Module 48 - Model Recruitment | Tool: Claude + Instagram + Model Mayhem | Difficulty: hard | Cost: $20/mo | Leverage: Months of networking compressed to days per city." + resource_type: knowledge + sort_order: 36 + - name: Casting Brief Generator + description: "Describe concept, mood, setting. Claude generates professional casting brief with measurements, aesthetic references, wardrobe, logistics, usage rights, day rate context." + detail: "Domain: Module 48 - Model Recruitment | Tool: Claude + Midjourney | Difficulty: easy | Cost: $20 + $10/mo | Leverage: Attracts better talent, prevents scope creep." + resource_type: knowledge + sort_order: 37 + - name: TFP / Collaboration Negotiation Scripts + description: "AI drafts TFP or reduced-rate collaboration proposals that feel fair. Scripts for approaching models, responding to rate quotes, setting usage boundaries." + detail: "Domain: Module 48 - Model Recruitment | Tool: Claude with fashion industry context | Difficulty: easy | Cost: $20/mo | Leverage: Look experienced from your first shoot." + resource_type: knowledge + sort_order: 38 + # Module 48 - Pre-Launch & Organic Growth + - name: Pre-Launch Hype Sequence + description: "AI designs 6-week pre-launch content calendar: tease, build, buzz, launch. Claude writes every caption, CTA, and email. Schedule and execute on autopilot." + detail: "Domain: Module 48 - Pre-Launch | Tool: Claude + Typefully + Canva/Figma | Difficulty: medium | Cost: $20/mo | Leverage: Launch to demand, not into a void." + resource_type: knowledge + sort_order: 39 + - name: Organic Instagram Growth Engine + description: "AI analyzes top-performing posts, reverse-engineers patterns. Generates weekly plan: feed posts, Stories, Reels. Key: 10 genuine comments/day on adjacent accounts." + detail: "Domain: Module 48 - Pre-Launch | Tool: Claude + Later/Planoly + Insights | Difficulty: medium | Cost: $20/mo | Leverage: 200-500 new followers/week organically." + resource_type: knowledge + sort_order: 40 + - name: UGC & Micro-Influencer Outreach + description: "Identify micro-influencers (1k-20k) by analyzing hashtags, engagement, demographics. Generate personalized gifting proposals. Scale to 20-30 per month." + detail: "Domain: Module 48 - Pre-Launch | Tool: Claude + Instagram + Notion CRM | Difficulty: medium | Cost: $20/mo + product | Leverage: 3-5x better conversion than macro." + resource_type: knowledge + sort_order: 41 + - name: Email List Building & Nurture + description: "AI designs landing page copy, 5-email welcome sequence, weekly newsletter template. All in brand voice. Each email has one CTA." + detail: "Domain: Module 48 - Pre-Launch | Tool: Claude + Klaviyo/Mailchimp | Difficulty: medium | Cost: $20/mo + $0-20/mo | Leverage: Email converts 5-10x higher than social." + resource_type: knowledge + sort_order: 42 + - name: Supplier & Manufacturer Research + description: "AI researches manufacturers by product type and MOQ across regions. Drafts professional inquiry emails. Compares quotes with scoring matrix." + detail: "Domain: Module 48 - Pre-Launch | Tool: ChatGPT browsing + Claude | Difficulty: hard | Cost: $20/mo | Leverage: Research 30, contact 15, compare 8." + resource_type: knowledge + sort_order: 43 + - name: Trend & Competitor Intelligence + description: "Weekly AI scan: competitor launches, trending silhouettes/colors, trade show trends, pricing analysis. Claude synthesizes into 1-page actionable brief." + detail: "Domain: Module 48 - Pre-Launch | Tool: ChatGPT browsing + Claude | Difficulty: medium | Cost: $20/mo | Leverage: 70% of WGSN signal for $20/month." + resource_type: knowledge + sort_order: 44 + # Freelance Business + - name: Client Research & Proposal Generator + description: "Paste client's website, blog posts, job listings, tech stack. Claude generates tailored proposal with problem analysis, skill mapping, 3 pricing options." + detail: "Domain: Freelance Business | Tool: Claude + company website + LinkedIn | Difficulty: easy | Cost: $20/mo | Leverage: 25-40% close rate vs 5-10% generic." + resource_type: knowledge + sort_order: 45 + - name: Rate Benchmarking & Negotiation + description: "AI researches market rates for your exact service stack. Builds data-backed rate card with justification language." + detail: "Domain: Freelance Business | Tool: ChatGPT browsing + Claude | Difficulty: easy | Cost: $20/mo | Leverage: Most freelancers underprice by 20-40%." + resource_type: knowledge + sort_order: 46 + - name: Automated Admin & Invoicing + description: "AI designs freelance OS: project tracker, contract templates, automated invoice reminders, time tracking summaries. Claude writes SOWs from 3-line briefs." + detail: "Domain: Freelance Business | Tool: Claude + Notion/Airtable + Make.com | Difficulty: hard | Cost: $20 + $10-20/mo | Leverage: Reclaim 8-12 hours/week." + resource_type: knowledge + sort_order: 47 + - name: Portfolio & Case Study Writer + description: "For each project: what they needed, what you built, what the outcome was. Claude generates structured case study: challenge, approach, result, tech, client quote." + detail: "Domain: Freelance Business | Tool: Claude + your project data | Difficulty: easy | Cost: $20/mo | Leverage: Portfolio with case studies converts 3x better." + resource_type: knowledge + sort_order: 48 + - name: Cold Outreach at Scale + description: "Define ICP. AI identifies 100 companies/month via Apollo.io. Claude generates personalized cold emails referencing funding, product issues, job gaps. 3-email sequence." + detail: "Domain: Freelance Business | Tool: Claude + Apollo.io + Lemlist | Difficulty: hard | Cost: $20 + $50-100/mo | Leverage: 5 new conversations/month." + resource_type: knowledge + sort_order: 49 + # Language Learning (Daily) + - name: AI Conversation Partner + description: "Daily 15-minute conversation in target language. System prompt corrects grammar in-line, gives 3 repeated mistakes and 5 new vocab at session end." + detail: "Domain: Language Learning | Tool: ChatGPT Voice Mode or Claude | Difficulty: easy | Cost: $20/mo | Leverage: $30-50/hour tutor for $20/month unlimited." + resource_type: knowledge + sort_order: 50 + - name: Contextual Vocabulary Builder + description: "Tell Claude what you'll be doing this week. Get 30-40 situation-specific vocabulary words with example sentences. Export to Anki." + detail: "Domain: Language Learning | Tool: Claude + Anki | Difficulty: easy | Cost: $20/mo | Leverage: 4-5x better retention than word lists." + resource_type: knowledge + sort_order: 51 + - name: Immersion Content Curator + description: "AI curates daily immersion diet matched to your level and interests: YouTube channel, podcast, social accounts, news source in target language." + detail: "Domain: Language Learning | Tool: ChatGPT browsing + Claude | Difficulty: easy | Cost: $20/mo | Leverage: Removes the discovery problem." + resource_type: knowledge + sort_order: 52 + - name: Writing Correction Loop + description: "Write daily 100-200 word journal entry in target language. Claude returns corrected version, grammar explanations, native rephrasing, one advanced structure." + detail: "Domain: Language Learning | Tool: Claude with writing analysis | Difficulty: easy | Cost: $20/mo | Leverage: Pattern recognition for your specific errors." + resource_type: knowledge + sort_order: 53 + - name: Grammar Pattern Drilling + description: "Tell Claude which structure you struggle with. It explains with 3 examples, gives 10 fill-in exercises of increasing difficulty, corrects, repeats until 8/10 right." + detail: "Domain: Language Learning | Tool: Claude Socratic mode | Difficulty: easy | Cost: $20/mo | Leverage: Targeted drilling on YOUR weak points." + resource_type: knowledge + sort_order: 54 + # Travel & Lifestyle Optimization + - name: Flight & Hotel Deal Hunting + description: "Tell Claude destination, dates, budget. Research cheapest booking window, best travel day, nearby airports, hotel vs Airbnb, credit card points strategies." + detail: "Domain: Travel & Lifestyle | Tool: ChatGPT browsing + Google Flights + Kayak | Difficulty: easy | Cost: $20/mo | Leverage: 20-40% savings, $1,000-3,000/year." + resource_type: knowledge + sort_order: 55 + - name: City & Experience Optimizer + description: "Give Claude city, interests, budget, days. Get day-by-day itinerary optimized for geographic clustering, local favorites over tourist traps, reservation timing." + detail: "Domain: Travel & Lifestyle | Tool: Claude + Google Maps + local blogs | Difficulty: easy | Cost: $20/mo | Leverage: Local knowledge that would take 3 trips." + resource_type: knowledge + sort_order: 56 + - name: Relocation Research Assistant + description: "Claude compares cities across cost of living, tax implications, visa, quality of life, internet, coworking, social scene, healthcare. Weighted scorecard based on YOUR priorities." + detail: "Domain: Travel & Lifestyle | Tool: Claude + Numbeo + local forums | Difficulty: medium | Cost: $20/mo | Leverage: Systematic comparison across 20+ factors." + resource_type: knowledge + sort_order: 57 diff --git a/curriculum/tracks/databases.yaml b/curriculum/tracks/databases.yaml new file mode 100644 index 0000000..934bcc1 --- /dev/null +++ b/curriculum/tracks/databases.yaml @@ -0,0 +1,372 @@ +id: databases +title: Databases +description: > + Master the database landscape - SQLite internals, PostgreSQL at scale, DuckDB analytics, + NoSQL tradeoffs, architecture patterns, and interview-ready knowledge. +difficulty: intermediate +track_type: resource +modules: + - id: sqlite + title: SQLite + objective: > + Internals, advanced SQL, performance tuning, and the concurrency model - everything to master the most deployed database. + color: "#55cdff" + sort_order: 1 + resources: + - name: SQLite Official Docs + url: https://sqlite.org/docs.html + description: > + Authoritative reference for every SQL feature, PRAGMA, and API. + detail: > + Start here for any SQLite question - the docs are unusually clear. + sort_order: 1 + - name: How SQLite Works (Fly.io) + url: https://fly.io/blog/sqlite-internals-btree/ + description: > + Deep dive into B-tree page layout, cell format, and how SQLite stores data on disk. + detail: > + Read this to understand what happens under SELECT and INSERT at the page level. + sort_order: 2 + - name: SQLite Performance Tuning (phiresky) + url: https://phiresky.github.io/blog/2020/sqlite-performance-tuning/ + description: > + Practical PRAGMA settings and schema patterns for 10-100x speedups. + detail: > + Apply these PRAGMAs to any new SQLite project - they compound. + sort_order: 3 + - name: SQLite Concurrency (10000 Meters) + url: https://www.sqlite.org/wal.html + description: > + WAL mode, reader-writer concurrency, checkpointing, and lock states. + detail: > + Essential for understanding why WAL is the default for server workloads. + sort_order: 4 + - name: SQLite FTS5 Docs + url: https://sqlite.org/fts5.html + description: > + Full-text search engine built into SQLite - tokenizers, ranking, and snippets. + detail: > + Use when you need search without adding Elasticsearch. + sort_order: 5 + - name: CMU Database Group - SQLite + url: https://www.youtube.com/watch?v=gpxnbly9bz4 + description: > + Andy Pavlo's lecture on SQLite internals - architecture, VDBE, and design philosophy. + detail: > + Best single video for understanding SQLite's architecture end-to-end. + sort_order: 6 + + - id: postgresql + title: PostgreSQL + objective: > + When and why Postgres, key extensions, JSONB, partitioning, and the features that make it the default choice. + color: "#ffc47c" + sort_order: 2 + resources: + - name: PostgreSQL Official Docs + url: https://www.postgresql.org/docs/current/ + description: > + Comprehensive reference - the gold standard for database docs. + detail: > + Bookmark the index and query planner chapters. + sort_order: 1 + - name: pgvector + url: https://github.com/pgvector/pgvector + description: > + Vector similarity search in Postgres - IVFFlat, HNSW indexes for embeddings. + detail: > + Use when you need vector search without a separate vector DB. + sort_order: 2 + - name: Crunchy Data Blog + url: https://www.crunchydata.com/blog + description: > + Production Postgres patterns, extensions, and performance deep dives. + detail: > + Best blog for practical Postgres knowledge beyond the docs. + sort_order: 3 + - name: pganalyze + url: https://pganalyze.com/docs + description: > + Query performance analysis, index advisor, and EXPLAIN visualization. + detail: > + Use their EXPLAIN visualizer to debug slow queries. + sort_order: 4 + - name: Just Use Postgres (Stephan Schmidt) + url: https://www.amazingcto.com/postgres-for-everything/ + description: > + The case for Postgres as your only database - queue, cache, search, and more. + detail: > + Read for the architectural argument, then know when to break the rule. + sort_order: 5 + - name: PostGIS + url: https://postgis.net/documentation/ + description: > + Geospatial extension - spatial indexes, distance queries, geometry types. + detail: > + Mandatory knowledge if you touch location-based features. + sort_order: 6 + + - id: duckdb + title: DuckDB + objective: > + Columnar analytics in-process - vectorized execution, zero-copy Parquet, and when to reach for it over SQLite or Pandas. + color: "#5bb86e" + sort_order: 3 + resources: + - name: DuckDB Official Docs + url: https://duckdb.org/docs/ + description: > + Complete reference for SQL dialect, data import, Python API, and extensions. + detail: > + The docs are excellent - check here first for any feature. + sort_order: 1 + - name: MotherDuck Blog + url: https://motherduck.com/blog/ + description: > + Cloud DuckDB service - hybrid execution, sharing, and production patterns. + detail: > + Read for the serverless analytics architecture vision. + sort_order: 2 + - name: DuckDB vs SQLite vs Pandas + url: https://duckdb.org/why_duckdb.html + description: > + Why DuckDB exists - the gap between SQLite (OLTP) and Pandas (single-threaded). + detail: > + Read to understand when DuckDB is the right tool vs alternatives. + sort_order: 3 + - name: DuckDB + Arrow Integration + url: https://duckdb.org/docs/guides/python/sql_on_arrow.html + description: > + Zero-copy querying of Arrow tables - no data movement overhead. + detail: > + Key pattern for integrating DuckDB into data pipelines. + sort_order: 4 + - name: endjin DuckDB Deep Dive + url: https://endjin.com/blog/2024/10/the-ultimate-guide-to-duckdb + description: > + Architecture overview, vectorized execution engine, and extension system. + detail: > + Best single article for understanding DuckDB internals. + sort_order: 5 + + - id: mongodb_redis + title: MongoDB & Redis + objective: > + Document model fit and antifit, Redis data structures, caching patterns, and the DynamoDB comparison. + color: "#eb5757" + sort_order: 4 + resources: + - name: MongoDB University + url: https://learn.mongodb.com/ + description: > + Free courses on document modeling, aggregation, indexing, and Atlas. + detail: > + Take the M001 and M320 (data modeling) courses. + sort_order: 1 + - name: Redis University + url: https://university.redis.io/ + description: > + Free courses on data structures, caching, streams, and search. + detail: > + Focus on RU101 (intro) and RU202 (streams) for interview prep. + sort_order: 2 + - name: Redis Data Structures + url: https://redis.io/docs/latest/develop/data-types/ + description: > + Complete guide to strings, hashes, lists, sets, sorted sets, streams, and HyperLogLog. + detail: > + Know when to use each data structure - this is the Redis interview question. + sort_order: 3 + - name: DynamoDB Guide (Alex DeBrie) + url: https://www.dynamodbguide.com/ + description: > + Single-table design, access patterns, GSI strategies, and the DynamoDB mental model. + detail: > + Essential if interviewing at AWS shops or building serverless. + sort_order: 4 + - name: MongoDB Schema Design Patterns + url: https://www.mongodb.com/blog/post/building-with-patterns-a-summary + description: > + Bucket, computed, subset, extended reference, and other schema patterns. + detail: > + Know when to embed vs reference - the core modeling decision. + sort_order: 5 + + - id: architecture + title: Architecture Patterns + objective: > + Polyglot persistence, CQRS, event sourcing, cache-aside, and the patterns that connect databases to systems. + color: "#5e6ad2" + sort_order: 5 + resources: + - name: Designing Data-Intensive Applications + url: https://dataintensive.net + description: > + The bible of data systems - replication, partitioning, consistency, and batch/stream processing. + detail: > + Read chapters 5-9 for distributed database fundamentals. + sort_order: 1 + - name: microservices.io Patterns + url: https://microservices.io/patterns/data/database-per-service.html + description: > + Database-per-service, saga, CQRS, event sourcing, and API composition patterns. + detail: > + Reference catalog for data patterns in microservices. + sort_order: 2 + - name: Martin Fowler - CQRS + url: https://martinfowler.com/bliki/CQRS.html + description: > + Command Query Responsibility Segregation - separate read and write models. + detail: > + Start here before diving into event sourcing. Understand why, not just how. + sort_order: 3 + - name: AWS Well-Architected (Data) + url: https://docs.aws.amazon.com/wellarchitected/latest/framework/perf-data.html + description: > + Purpose-built databases, caching strategies, and data access patterns. + detail: > + Good mental model for picking managed database services. + sort_order: 4 + - name: Event Sourcing (Greg Young) + url: https://www.eventstore.com/event-sourcing + description: > + Store events, not state. Derive current state by replaying the event log. + detail: > + Know the tradeoffs: full audit trail vs query complexity and storage growth. + sort_order: 5 + + - id: comparison + title: Comparison + objective: > + A decision framework for picking the right database - the 10-database matrix and the 'just use Postgres' analysis. + color: "#f472b6" + sort_order: 6 + resources: + - name: DB-Engines Ranking + url: https://db-engines.com/en/ranking + description: > + Monthly popularity rankings across all database categories. + detail: > + Check trends, not absolute numbers. Useful for context. + sort_order: 1 + - name: DataCamp - DuckDB vs SQLite + url: https://www.datacamp.com/blog/duckdb-vs-sqlite + description: > + Head-to-head comparison of the two embedded databases for different workloads. + detail: > + Good framework for explaining OLTP vs OLAP tradeoffs in interviews. + sort_order: 2 + - name: Stack Overflow Developer Survey + url: https://survey.stackoverflow.co/2024/#technology-most-popular-technologies + description: > + What developers actually use - database popularity by usage. + detail: > + Reference for market demand and hiring trends. + sort_order: 3 + - name: CMU Database Group Lectures + url: https://15445.courses.cs.cmu.edu/ + description: > + Andy Pavlo's database systems course - the best free database internals education. + detail: > + Watch the storage, indexing, and query processing lectures. + sort_order: 4 + - name: Use The Index, Luke + url: https://use-the-index-luke.com/ + description: > + SQL indexing and tuning explained with B-tree internals. Database-agnostic. + detail: > + Best single resource for understanding how indexes work. + sort_order: 5 + + - id: local_first + title: Local-First & Edge + objective: > + SQLite at the edge - Litestream, Turso, cr-sqlite, Electric SQL, and the local-first movement. + color: "#4ade80" + sort_order: 7 + resources: + - name: Local-First Software (Ink & Switch) + url: https://www.inkandswitch.com/local-first/ + description: > + The foundational paper on local-first principles - ownership, collaboration, longevity. + detail: > + Read this first to understand the philosophy before the tools. + sort_order: 1 + - name: Litestream + url: https://litestream.io/ + description: > + Continuous streaming replication of SQLite to S3/GCS/Azure. Disaster recovery for embedded DBs. + detail: > + Use for any production SQLite deployment that needs backup. + sort_order: 2 + - name: Turso (libSQL) + url: https://turso.tech/ + description: > + Edge-replicated SQLite fork. Embedded replicas with server-side primary. + detail: > + Evaluate for multi-region apps that want SQLite's simplicity at the edge. + sort_order: 3 + - name: Electric SQL + url: https://electric-sql.com/ + description: > + Postgres-to-SQLite sync. Active-active local-first with Postgres as the source of truth. + detail: > + Most mature Postgres-to-local sync for offline-first apps. + sort_order: 4 + - name: cr-sqlite + url: https://github.com/vlcn-io/cr-sqlite + description: > + CRDT-based merge for SQLite. Multi-writer conflict resolution without a server. + detail: > + Study for understanding how CRDTs apply to relational data. + sort_order: 5 + - name: sqlite-vec + url: https://github.com/asg017/sqlite-vec + description: > + Vector search extension for SQLite. KNN queries on embeddings in-process. + detail: > + Use for local RAG systems or embedded ML applications. + sort_order: 6 + + - id: interviews + title: Interview Prep + objective: > + ACID with real failures, CAP theorem, isolation levels, indexing strategies, and system design decisions. + color: "#55cdff" + sort_order: 8 + resources: + - name: Designing Data-Intensive Applications + url: https://dataintensive.net + description: > + The single most important book for database and distributed systems interviews. + detail: > + Read at least chapters 2, 3, 5, 7, and 9 before any senior interview. + sort_order: 1 + - name: NeetCode SQL + url: https://neetcode.io/practice + description: > + Curated SQL practice problems organized by pattern and difficulty. + detail: > + Do 2-3 problems daily to build SQL muscle memory. + sort_order: 2 + - name: DataLemur + url: https://datalemur.com/ + description: > + SQL interview questions from FAANG companies with detailed solutions. + detail: > + Focus on window functions and CTEs - they dominate interviews. + sort_order: 3 + - name: System Design Interview (Alex Xu) + url: https://www.amazon.com/System-Design-Interview-insiders-Second/dp/B08CMF2CQF + description: > + Database-heavy system design cases - URL shortener, chat, feed, rate limiter. + detail: > + Practice the database schema design for each case study. + sort_order: 4 + - name: Use The Index, Luke + url: https://use-the-index-luke.com/ + description: > + B-tree indexing explained visually. Covers composite indexes, partial indexes, and query plans. + detail: > + Best resource for answering 'how would you optimize this query?'. + sort_order: 5 diff --git a/curriculum/tracks/embodied-ai.yaml b/curriculum/tracks/embodied-ai.yaml new file mode 100644 index 0000000..a9744cc --- /dev/null +++ b/curriculum/tracks/embodied-ai.yaml @@ -0,0 +1,425 @@ +id: embodied-ai +title: Embodied AI +description: > + Explore the full landscape of AI in the physical world - world models, robot + foundation models, humanoid robotics, service robots, autonomous vehicles, + agentic automation, edge inference, and the physical AI industry. +difficulty: intermediate +track_type: resource +modules: + - id: world_models + title: World Models + objective: > + How AI learns to predict and plan in the physical world - not just process text. + color: "#55cdff" + sort_order: 1 + resources: + - name: Yann LeCun - AMI Labs (Meta) + url: https://ai.meta.com/blog/v-jepa-yann-lecun-ai-model-video-joint-embedding-predictive-architecture/ + description: > + JEPA architecture for learning world models through latent prediction rather + than pixel generation. + detail: > + Study the theoretical foundation: why LeCun argues world models, not + autoregressive token prediction, are the path to AMI. + sort_order: 1 + - name: World Labs (Fei-Fei Li) + url: https://www.worldlabs.ai + description: > + Spatial intelligence startup building large world models for 3D scene + understanding and generation. + detail: > + Track the spatial AI thesis and how it differs from language-first + approaches. + sort_order: 2 + - name: NVIDIA Cosmos + url: https://developer.nvidia.com/cosmos + description: > + World foundation models for physical AI - video generation, driving + simulation, and robotics planning. + detail: > + Study how Cosmos bridges simulation and real-world prediction for embodied + systems. + sort_order: 3 + - name: Google DeepMind Genie 2 + url: https://deepmind.google/discover/blog/genie-2-a-large-scale-foundation-world-model/ + description: > + Foundation world model generating interactive 3D environments from single + images. + detail: > + Understand action-conditioned generation and how it enables training + embodied agents in imagination. + sort_order: 4 + - name: DreamerV3 + url: https://danijar.com/project/dreamerv3/ + description: > + Model-based RL agent that learns a world model and achieves human-level play + across diverse domains. + detail: > + The canonical reference for world-model-based reinforcement learning in + embodied settings. + sort_order: 5 + + - id: core + title: Embodied AI + objective: > + From simulation to real robots - physical reasoning, manipulation, and + navigation. + color: "#ffc47c" + sort_order: 2 + resources: + - name: Google DeepMind Robotics + url: https://deepmind.google/discover/blog/?category=robotics + description: > + RT-2, RT-X, and foundation models for robot control via language grounding. + detail: > + Track how vision-language-action models are closing the sim-to-real gap. + sort_order: 1 + - name: NVIDIA Isaac Sim + url: https://developer.nvidia.com/isaac-sim + description: > + GPU-accelerated simulation for training robot policies at scale before + physical deployment. + detail: > + Study sim-to-real transfer pipelines and domain randomization. + sort_order: 2 + - name: Open X-Embodiment + url: https://robotics-transformer-x.github.io + description: > + Cross-embodiment dataset and models enabling knowledge transfer across + different robot platforms. + detail: > + Understand how shared datasets unlock generalization across robot + morphologies. + sort_order: 3 + - name: Hugging Face LeRobot + url: https://github.com/huggingface/lerobot + description: > + Open-source toolkit for real-world robotics with pretrained policies and + datasets. + detail: > + Hands-on practice with imitation learning and policy fine-tuning. + sort_order: 4 + - name: Physical Intelligence (pi) + url: https://www.physicalintelligence.company + description: > + Building general-purpose robot foundation models trained on diverse physical + tasks. + detail: > + Watch for breakthroughs in generalist manipulation policies. + sort_order: 5 + + - id: humanoid + title: Humanoid Robotics + objective: > + The race to build robots that work alongside people - Atlas, Optimus, Figure, + and more. + color: "#5bb86e" + sort_order: 3 + resources: + - name: Figure AI + url: https://www.figure.ai + description: > + Figure 02 humanoid with OpenAI partnership for conversational reasoning and + autonomous task execution. + detail: > + Study the LLM-to-actuator pipeline and BMW factory deployment. + sort_order: 1 + - name: Tesla Optimus + url: https://www.tesla.com/optimus + description: > + Leveraging Tesla FSD neural nets and Dojo for humanoid robot perception and + control. + detail: > + Track the vertical integration play: AI chips, data, and manufacturing at + scale. + sort_order: 2 + - name: Boston Dynamics Atlas + url: https://bostondynamics.com/atlas + description: > + Electric Atlas with advanced manipulation, whole-body control, and industrial + applications. + detail: > + Study the transition from hydraulic research platform to commercial electric + humanoid. + sort_order: 3 + - name: 1X Technologies + url: https://www.1x.tech + description: > + NEO humanoid designed for home environments with learned behaviors from + neural network policies. + detail: > + Watch the consumer humanoid market thesis and safety approach. + sort_order: 4 + - name: Agility Robotics Digit + url: https://agilityrobotics.com + description: > + Purpose-built for warehouse logistics with Amazon deployment partnership. + detail: > + Study the focused use-case strategy vs general-purpose humanoids. + sort_order: 5 + - name: Unitree + url: https://www.unitree.com + description: > + Low-cost humanoid and quadruped robots making embodied AI accessible for + research and deployment. + detail: > + Track the cost disruption angle: $16K humanoid vs $100K+ competitors. + sort_order: 6 + + - id: service + title: Service Robotics + objective: > + Robots already at work - restaurants, hospitals, warehouses, and last-mile + delivery. + color: "#eb5757" + sort_order: 4 + resources: + - name: Bear Robotics Servi + url: https://www.bearrobotics.ai + description: > + Autonomous food service robots deployed in restaurants, hotels, and casinos. + detail: > + Study the unit economics of robot waitstaff vs human labor costs. + sort_order: 1 + - name: Savioke Relay + url: https://www.savioke.com + description: > + Autonomous delivery robots for hotels, hospitals, and high-rises. + detail: > + Track the hospitality automation ROI and guest experience data. + sort_order: 2 + - name: Nuro + url: https://www.nuro.ai + description: > + Purpose-built autonomous delivery vehicles for groceries, food, and packages. + detail: > + Study the regulatory path for driverless delivery on public roads. + sort_order: 3 + - name: Serve Robotics + url: https://www.serverobotics.com + description: > + Sidewalk delivery robots partnered with Uber Eats for last-mile autonomous + delivery. + detail: > + Track the Uber partnership and sidewalk autonomy regulatory landscape. + sort_order: 4 + - name: Diligent Robotics Moxi + url: https://www.diligentrobots.com + description: > + Hospital service robot handling supply delivery, reducing nurse walking time + by 30%. + detail: > + Study healthcare labor augmentation and clinical workflow integration. + sort_order: 5 + + - id: autonomous + title: Autonomous Systems + objective: > + Vehicles and drones that navigate the real world without a human at the wheel. + color: "#5e6ad2" + sort_order: 5 + resources: + - name: Waymo + url: https://waymo.com + description: > + Fully autonomous ride-hailing in San Francisco, Phoenix, LA. Most mature + commercial self-driving. + detail: > + Study the sensor fusion stack and how they achieved commercial scale. + sort_order: 1 + - name: Tesla FSD + url: https://www.tesla.com/autopilot + description: > + Vision-only self-driving using end-to-end neural networks trained on fleet + data. + detail: > + Track the camera-only vs lidar debate and real-world safety data. + sort_order: 2 + - name: Cruise + url: https://www.getcruise.com + description: > + GM-backed autonomous vehicles with urban robotaxi operations. + detail: > + Study the regulatory setbacks and recovery strategy post-2023 incidents. + sort_order: 3 + - name: Skydio + url: https://www.skydio.com + description: > + Autonomous drones for infrastructure inspection, public safety, and defense. + detail: > + Study the enterprise drone autonomy stack and computer vision pipeline. + sort_order: 4 + - name: Zipline + url: https://www.flyzipline.com + description: > + Autonomous drone delivery for medical supplies and consumer goods at national + scale. + detail: > + Track the longest-running commercial drone delivery operation globally. + sort_order: 5 + + - id: agentic + title: Agentic Automation + objective: > + AI doing the work of knowledge workers - customer service, back-office, and + beyond. + color: "#f472b6" + sort_order: 6 + resources: + - name: Sierra AI + url: https://sierra.ai + description: > + Enterprise AI agents for customer service replacing traditional contact + centers. Founded by Bret Taylor. + detail: > + Study the conversational AI agent architecture for enterprise support. + sort_order: 1 + - name: Decagon + url: https://decagon.ai + description: > + AI customer support agents with enterprise-grade reliability and compliance. + detail: > + Track how AI agents handle edge cases, escalation, and knowledge grounding. + sort_order: 2 + - name: UiPath + url: https://www.uipath.com + description: > + RPA platform evolving into AI-powered agentic automation for enterprise + workflows. + detail: > + Study the RPA-to-AI-agent evolution and enterprise adoption. + sort_order: 3 + - name: Anthropic Computer Use + url: https://docs.anthropic.com/en/docs/agents-and-tools/computer-use + description: > + Claude operating computers via screenshots and mouse/keyboard - + general-purpose digital agent. + detail: > + Study the computer-use paradigm: how LLMs interact with arbitrary software. + sort_order: 4 + - name: Cognition Devin + url: https://www.cognition.ai + description: > + Autonomous AI software engineer handling full development tasks end-to-end. + detail: > + Track the software engineering agent benchmark and real-world adoption. + sort_order: 5 + - name: Adept AI + url: https://www.adept.ai + description: > + AI agents that interact with enterprise software through natural language + commands. + detail: > + Study the action model approach: training AI to use software like humans do. + sort_order: 6 + + - id: edge_inference + title: Edge Inference + objective: > + Run models on tiny hardware at real-time speed - the last mile from training + to deployment. + color: "#4ade80" + sort_order: 7 + resources: + - name: NVIDIA TensorRT + url: https://developer.nvidia.com/tensorrt + description: > + High-performance deep learning inference optimizer and runtime for NVIDIA + GPUs - the standard for production edge deployment. + detail: > + Learn the full pipeline: PyTorch to ONNX to TensorRT engine. Master INT8 + calibration and layer fusion. + sort_order: 1 + - name: ONNX Runtime + url: https://onnxruntime.ai + description: > + Cross-platform inference engine supporting CPU, GPU, and NPU hardware. The + universal intermediate format for model deployment. + detail: > + Use as the portable inference layer: export once to ONNX, deploy anywhere + (Jetson, x86, ARM, browser). + sort_order: 2 + - name: NVIDIA Jetson Orin + url: https://developer.nvidia.com/embedded/jetson-orin + description: > + Edge AI computer for robotics - up to 275 TOPS in 60W. The standard + hardware for on-device robot inference. + detail: > + Target platform for proof projects. Orin Nano ($250) for entry, Orin NX for + production robots. + sort_order: 3 + - name: Qualcomm AI Hub + url: https://aihub.qualcomm.com + description: > + Optimize and deploy models on Qualcomm Snapdragon chips - phones, XR + headsets, IoT devices, and drones. + detail: > + Study the mobile/drone edge inference stack. Relevant for autonomous drones + and wearable AI. + sort_order: 4 + - name: Apache TVM + url: https://tvm.apache.org + description: > + Open-source compiler framework that optimizes ML models for any hardware + backend (CPU, GPU, FPGA, custom accelerators). + detail: > + Deep dive into how model compilation works across hardware targets. Advanced + but fundamental knowledge. + sort_order: 5 + - name: ExecuTorch (Meta) + url: https://pytorch.org/executorch + description: > + PyTorch's on-device inference framework - deploy PyTorch models to mobile, + embedded, and edge with minimal changes. + detail: > + The PyTorch-native path to edge: no ONNX export needed. Watch for robotics + adoption alongside LeRobot. + sort_order: 6 + + - id: physical-ai + title: Physical AI + objective: > + Prep for the next wave of AI roles - world models, sim-to-real, 3D vision, + and physical AI. + color: "#ffc47c" + sort_order: 8 + resources: + - name: NVIDIA Physical AI + url: https://developer.nvidia.com/physical-ai + description: > + NVIDIA's physical AI platform: Isaac Sim, Cosmos, and the full sim-to-real + stack. + detail: > + Study the end-to-end pipeline from simulation to real-world robot deployment. + sort_order: 1 + - name: World Labs + url: https://www.worldlabs.ai + description: > + Fei-Fei Li's spatial intelligence company building large world models. + detail: > + Track the spatial AI thesis and job openings in this space. + sort_order: 2 + - name: Figure AI Careers + url: https://www.figure.ai/careers + description: > + Humanoid robotics company hiring for world model, perception, and control + roles. + detail: > + Study their job descriptions to understand the skill requirements for + physical AI roles. + sort_order: 3 + - name: Physical Intelligence (pi) + url: https://www.physicalintelligence.company + description: > + General-purpose robot foundation models. Raised $400M+. + detail: > + Track the company building the GPT moment for robotics. + sort_order: 4 + - name: 1X Technologies + url: https://www.1x.tech + description: > + NEO humanoid built with learned neural network policies. + detail: > + Study the consumer humanoid thesis and their ML-first approach. + sort_order: 5 diff --git a/curriculum/tracks/freelance-strategy.yaml b/curriculum/tracks/freelance-strategy.yaml new file mode 100644 index 0000000..2656e77 --- /dev/null +++ b/curriculum/tracks/freelance-strategy.yaml @@ -0,0 +1,156 @@ +id: freelance-strategy +title: Freelance Strategy +description: > + Build a sustainable freelance practice - positioning, pricing, and growth. +difficulty: intermediate +track_type: resource +modules: + - id: realtime_systems + title: Real-Time Systems + objective: > + Build AI products that feel instant - streaming UX, event pipelines, and low-latency infra. + color: "#55cdff" + sort_order: 1 + resources: + - name: LiveKit + url: https://livekit.io + description: Real-time transport infrastructure for audio, video, and data channels used in interactive AI products. + detail: Useful when you need low-latency sessions. Voice is one use case, not the only one. + sort_order: 1 + - name: Ably + url: https://ably.com/docs + description: Managed pub/sub and WebSocket infrastructure with global edge fan-out and delivery guarantees. + detail: Great for shipping real-time updates fast without running custom socket infrastructure. + sort_order: 2 + - name: NATS + url: https://docs.nats.io + description: Lightweight event bus for low-latency messaging, request/reply, and streaming via JetStream. + detail: Strong choice for control planes, internal signaling, and event-driven AI workers. + sort_order: 3 + - name: Apache Kafka + url: https://kafka.apache.org/documentation/ + description: Durable event streaming for high-throughput pipelines, replayable logs, and consumer groups. + detail: Best when you need auditable event history and independent downstream consumers. + sort_order: 4 + - name: MDN WebSocket API + url: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API + description: Core browser/server bidirectional transport for live UX, streaming updates, and control messages. + detail: Know heartbeats, reconnect strategy, auth refresh, and backpressure handling. + sort_order: 5 + - name: Cloudflare Durable Objects + url: https://developers.cloudflare.com/durable-objects/ + description: Stateful edge compute for room/session coordination and shared low-latency state. + detail: Useful for collaborative AI, session routing, and deterministic per-session state handling. + sort_order: 6 + + - id: apis_at_scale + title: APIs at Scale + objective: > + Design APIs that handle real traffic - rate limiting, caching, observability, and scaling. + color: "#ffc47c" + sort_order: 2 + resources: + - name: Kong Gateway + url: https://docs.konghq.com + description: "API gateway: rate limiting, auth, load balancing, observability plugins, and service mesh integration." + detail: Know gateway patterns cold. Most production APIs sit behind Kong, Envoy, or AWS API Gateway. + sort_order: 1 + - name: gRPC Documentation + url: https://grpc.io/docs/ + description: "High-performance RPC framework: protobuf, streaming, deadlines, and service-to-service communication." + detail: gRPC is the default for internal APIs at scale. Practice protobuf schemas and bidirectional streaming. + sort_order: 2 + - name: Designing Data-Intensive Applications + url: https://dataintensive.net + description: "The bible for distributed systems: replication, partitioning, consistency, and batch/stream processing." + detail: Read chapters on replication and partitioning. Every system design interview draws from this book. + sort_order: 3 + - name: FastAPI + url: https://fastapi.tiangolo.com + description: "Modern Python API framework: async, type hints, auto-docs, WebSocket support, and dependency injection." + detail: The go-to for Python ML/AI APIs. Know how to structure a production FastAPI service with middleware. + sort_order: 4 + - name: Hono + url: https://hono.dev + description: "Ultra-fast edge-first web framework: works on Cloudflare Workers, Deno, Bun, and Node. TypeScript-native." + detail: Increasingly used for high-performance TypeScript APIs. Study the middleware and edge deployment patterns. + sort_order: 5 + + - id: ai_agent_infra + title: AI Agent Infra + objective: > + Ship AI agents that work at scale - orchestration, guardrails, cost control, and deployment. + color: "#5bb86e" + sort_order: 3 + resources: + - name: LangGraph + url: https://langchain-ai.github.io/langgraph/ + description: "Stateful agent orchestration: graph-based workflows, checkpointing, human-in-the-loop, and multi-agent coordination." + detail: The leading framework for production agent architectures. Study graph topologies and state management. + sort_order: 1 + - name: CrewAI + url: https://docs.crewai.com + description: Multi-agent orchestration with role-based agents, task delegation, and process management. + detail: Simpler than LangGraph for multi-agent setups. Study the role/task/process abstraction. + sort_order: 2 + - name: Anthropic Tool Use Docs + url: https://docs.anthropic.com/en/docs/build-with-claude/tool-use + description: "Claude's native tool calling: function definitions, structured outputs, and multi-turn tool use patterns." + detail: Master tool-use patterns. This is the core primitive for every AI agent. + sort_order: 3 + - name: OpenAI Assistants API + url: https://platform.openai.com/docs/assistants/overview + description: "Managed agent infrastructure: threads, runs, tools, file search, and code interpreter." + detail: Understand what managed agent APIs abstract vs what you'd build custom at a startup. + sort_order: 4 + - name: Letta (MemGPT) + url: https://docs.letta.com + description: Stateful agent framework with persistent memory, tool execution, and long-running agent deployment. + detail: Study the memory architecture - long-term memory is the hardest unsolved problem in production agents. + sort_order: 5 + - name: Temporal + url: https://temporal.io + description: "Durable execution engine: workflow orchestration, retries, timeouts, and exactly-once execution guarantees." + detail: Not AI-specific but critical for production agents. Temporal solves the reliability layer that agents need. + sort_order: 6 + + - id: production_hardening + title: Production Hardening + objective: > + Make your system survive production - observability, deployments, incidents, and chaos testing. + color: "#eb5757" + sort_order: 4 + resources: + - name: Grafana + Prometheus Stack + url: https://grafana.com/docs/grafana/latest/ + description: "Open-source observability: metrics (Prometheus), logs (Loki), traces (Tempo), and dashboards (Grafana)." + detail: The default observability stack. Know how to instrument a service and build actionable dashboards. + sort_order: 1 + - name: OpenTelemetry + url: https://opentelemetry.io/docs/ + description: "Vendor-neutral telemetry standard: traces, metrics, and logs with auto-instrumentation for major frameworks." + detail: OTel is the standard. Learn to instrument Python and TypeScript services with spans and custom metrics. + sort_order: 2 + - name: Google SRE Books + url: https://sre.google/books/ + description: "Site Reliability Engineering canon: SLOs, error budgets, toil reduction, incident response, and on-call practices." + detail: Read chapters on SLOs, monitoring, and incident management. These concepts come up in every production role. + sort_order: 3 + - name: k6 Load Testing + url: https://k6.io/docs/ + description: "Modern load testing: scripted scenarios, thresholds, CI integration, and distributed testing." + detail: Know how to load test an API before launch. Practice writing k6 scripts with realistic traffic patterns. + sort_order: 4 + - name: Fly.io / Railway + url: https://fly.io/docs/ + description: "Edge deployment platforms: containers, global distribution, auto-scaling, and production Postgres." + detail: Know how to deploy and scale services globally. Practice zero-downtime deployments. + sort_order: 5 + + - id: positioning + title: Positioning + objective: > + Become the obvious hire - proof projects, outreach strategy, and what signals top 0.01%. + color: "#5e6ad2" + sort_order: 5 + resources: [] diff --git a/curriculum/tracks/frontend-engineering.yaml b/curriculum/tracks/frontend-engineering.yaml new file mode 100644 index 0000000..e5fcadd --- /dev/null +++ b/curriculum/tracks/frontend-engineering.yaml @@ -0,0 +1,357 @@ +id: frontend-engineering +title: Frontend Engineering +description: > + Deep reference for modern frontend development - state management, component patterns, + data fetching, performance, TypeScript, testing, and architecture at scale. +difficulty: intermediate +track_type: resource +modules: + - id: state + title: State Management + objective: > + The five kinds of state, when to reach for a library, and patterns that keep your app predictable. + color: "#55cdff" + sort_order: 1 + resources: + - name: Zustand + url: https://github.com/pmndrs/zustand + description: > + Minimal, unopinionated state management with a hooks-first API and no boilerplate. + detail: > + Start here for most apps. Understand slices, selectors, and middleware (persist, devtools). + sort_order: 1 + - name: Jotai + url: https://jotai.org + description: > + Atomic state primitives inspired by Recoil - bottom-up, composable, and TypeScript-native. + detail: > + Best for derived state and when you want fine-grained reactivity without selectors. + sort_order: 2 + - name: XState + url: https://stately.ai/docs/xstate + description: > + Finite state machines and statecharts for complex UI flows with visualizable logic. + detail: > + Use for multi-step forms, auth flows, or any UI with more than 3 states and transitions between them. + sort_order: 3 + - name: TanStack Store + url: https://tanstack.com/store + description: > + Framework-agnostic reactive store from the TanStack ecosystem. + detail: > + Useful when you need a store that works across React, Solid, Vue - or as a learning model for signals. + sort_order: 4 + - name: Valtio + url: https://github.com/pmndrs/valtio + description: > + Proxy-based state that feels like mutable JavaScript but triggers React re-renders automatically. + detail: > + Good mental model for understanding proxy-based reactivity. Compare with MobX's approach. + sort_order: 5 + - name: Legend State + url: https://legendapp.com/open-source/state/v3/ + description: > + Signal-based state with fine-grained reactivity, persistence plugins, and sync adapters. + detail: > + Study the fine-grained reactivity model - it's where React state management is heading. + sort_order: 6 + + - id: components + title: Component Patterns + objective: > + Composition, compound components, headless UI, and hooks that make components reusable without over-abstraction. + color: "#ffc47c" + sort_order: 2 + resources: + - name: Radix Primitives + url: https://www.radix-ui.com/primitives + description: > + Unstyled, accessible UI primitives with compound component patterns and data-attribute styling. + detail: > + The gold standard for headless components. Study how they compose with asChild and manage focus. + sort_order: 1 + - name: React Aria (Adobe) + url: https://react-spectrum.adobe.com/react-aria/ + description: > + Hook-based accessibility primitives that separate behavior from rendering entirely. + detail: > + Read the hooks source to understand how ARIA patterns work under the hood. + sort_order: 2 + - name: Headless UI + url: https://headlessui.com + description: > + Tailwind Labs' unstyled, accessible components designed for Tailwind CSS integration. + detail: > + Compare with Radix - same problem, different API surface. Understand the tradeoffs. + sort_order: 3 + - name: Downshift + url: https://github.com/downshift-js/downshift + description: > + Primitives for building accessible combobox, select, and autocomplete components. + detail: > + A masterclass in the render prop and hooks patterns for maximum flexibility. + sort_order: 4 + - name: Shadcn/ui + url: https://ui.shadcn.com + description: > + Copy-paste component collection built on Radix + Tailwind. Own your components, no npm dependency. + detail: > + Study how it wraps Radix primitives with Tailwind styling - it's the modern component library pattern. + sort_order: 5 + - name: Ark UI + url: https://ark-ui.com + description: > + Headless component library from the Chakra team, built on state machines (Zag.js). + detail: > + Compare with Radix and React Aria - state machine-driven components are a different paradigm. + sort_order: 6 + + - id: data_layer + title: Data Layer + objective: > + Fetching, caching, mutations, optimistic updates, and real-time sync - everything between your UI and your API. + color: "#5bb86e" + sort_order: 3 + resources: + - name: TanStack Query + url: https://tanstack.com/query + description: > + Server state management - caching, deduplication, background refetching, optimistic updates. + detail: > + Understand stale-while-revalidate, query keys, mutations, and infinite queries. The mental model matters more than the API. + sort_order: 1 + - name: SWR + url: https://swr.vercel.app + description: > + Lightweight data fetching with stale-while-revalidate strategy from Vercel. + detail: > + Compare with TanStack Query - simpler API, fewer features. Good for understanding the core SWR pattern. + sort_order: 2 + - name: tRPC + url: https://trpc.io + description: > + End-to-end typesafe APIs without code generation - your backend types flow to the frontend. + detail: > + Study how it eliminates the API contract layer. Pairs with TanStack Query under the hood. + sort_order: 3 + - name: Relay + url: https://relay.dev + description: > + Meta's GraphQL client with compiler-driven data fetching and fragment colocation. + detail: > + The most opinionated client. Study fragment colocation and how it solves the waterfall problem. + sort_order: 4 + - name: Apollo Client + url: https://www.apollographql.com/docs/react + description: > + Full-featured GraphQL client with normalized caching, local state, and reactive variables. + detail: > + Understand normalized cache vs document cache (TanStack Query). Know when GraphQL is worth the complexity. + sort_order: 5 + - name: Ky + url: https://github.com/sindresorhus/ky + description: > + Tiny HTTP client built on fetch with retries, hooks, JSON shortcuts, and timeout handling. + detail: > + When you don't need a caching layer - just a better fetch. Good for understanding what fetch lacks. + sort_order: 6 + + - id: performance + title: Performance + objective: > + React's rendering model, memoization traps, virtualization, code splitting, and making 60fps feel effortless. + color: "#eb5757" + sort_order: 4 + resources: + - name: React Compiler + url: https://react.dev/learn/react-compiler + description: > + Automatic memoization at build time - the compiler decides what to memo so you don't have to. + detail: > + Understand what it auto-memoizes and what it can't. This is where manual useMemo/useCallback is heading. + sort_order: 1 + - name: Million.js + url: https://million.dev + description: > + Block virtual DOM that makes React components up to 70% faster by compiling away diffing. + detail: > + Study the block virtual DOM concept - it's a different approach to React performance than memoization. + sort_order: 2 + - name: TanStack Virtual + url: https://tanstack.com/virtual + description: > + Headless virtualization for lists, tables, and grids - render only what's visible. + detail: > + Essential for any list over 100 items. Understand how it measures and positions elements. + sort_order: 3 + - name: Partytown + url: https://partytown.builder.io + description: > + Move third-party scripts to web workers to keep the main thread free. + detail: > + Solves the analytics/tracking script performance problem. Understand the proxy architecture. + sort_order: 4 + - name: web.dev Performance + url: https://web.dev/learn/performance + description: > + Google's comprehensive guide to Core Web Vitals, loading, rendering, and runtime performance. + detail: > + The canonical reference for LCP, FID, CLS, INP. Know what each metric measures and how to fix it. + sort_order: 5 + - name: React Scan + url: https://github.com/aidenybai/react-scan + description: > + Visualize React component renders in real-time - see exactly what re-renders and why. + detail: > + Use alongside React DevTools Profiler. Faster feedback loop for spotting unnecessary renders. + sort_order: 6 + + - id: typescript + title: Advanced TypeScript + objective: > + Generics, inference, branded types, discriminated unions, and type-level programming that catches bugs at compile time. + color: "#5e6ad2" + sort_order: 5 + resources: + - name: Type Challenges + url: https://github.com/type-challenges/type-challenges + description: > + Collection of TypeScript type-level puzzles from easy to extreme - learn generics by doing. + detail: > + Work through Easy and Medium challenges. The type-level thinking transfers directly to library authoring. + sort_order: 1 + - name: Total TypeScript + url: https://www.totaltypescript.com + description: > + Matt Pocock's TypeScript courses covering generics, type transformations, and advanced patterns. + detail: > + The best resource for going from 'I use TypeScript' to 'I think in TypeScript'. Free tutorials are excellent. + sort_order: 2 + - name: ts-pattern + url: https://github.com/gvergnaud/ts-pattern + description: > + Exhaustive pattern matching for TypeScript with type narrowing and discriminated union support. + detail: > + Replaces switch statements with something safer. Study how it narrows types through matching. + sort_order: 3 + - name: Zod + url: https://zod.dev + description: > + Schema validation with automatic TypeScript type inference - define once, validate and type everywhere. + detail: > + The standard for runtime validation with static types. Understand z.infer and schema composition. + sort_order: 4 + - name: Effect + url: https://effect.website + description: > + TypeScript framework for typed errors, dependency injection, concurrency, and composable programs. + detail: > + Advanced - study the typed error channel and how it makes impossible states unrepresentable. + sort_order: 5 + - name: TypeScript Handbook + url: https://www.typescriptlang.org/docs/handbook/ + description: > + Official handbook covering the type system from basics to advanced generics and conditional types. + detail: > + Re-read the generics and conditional types sections after you have some experience. They click differently. + sort_order: 6 + + - id: testing + title: Testing + objective: > + Component tests, integration tests, E2E, mocking strategies, and accessibility testing that actually prevents regressions. + color: "#f472b6" + sort_order: 6 + resources: + - name: Testing Library + url: https://testing-library.com + description: > + Test components the way users use them - queries by role, text, and label instead of implementation details. + detail: > + The guiding principle: 'The more your tests resemble the way your software is used, the more confidence they give you.' + sort_order: 1 + - name: Vitest + url: https://vitest.dev + description: > + Vite-native test runner with Jest-compatible API, ESM support, and blazing fast watch mode. + detail: > + Understand the jsdom vs happy-dom environments, vi.mock hoisting, and inline snapshots. + sort_order: 2 + - name: Playwright + url: https://playwright.dev + description: > + Cross-browser E2E testing with auto-waiting, network interception, and visual regression support. + detail: > + Master locators (getByRole, getByText), auto-waiting, and the trace viewer for debugging failures. + sort_order: 3 + - name: MSW (Mock Service Worker) + url: https://mswjs.io + description: > + API mocking at the network level - intercepts requests in both browser and Node.js environments. + detail: > + The right way to mock APIs in tests. Understand request handlers and how they compose with Testing Library. + sort_order: 4 + - name: Axe-core + url: https://github.com/dequelabs/axe-core + description: > + Automated accessibility testing engine that catches WCAG violations in your components. + detail: > + Integrate with vitest-axe or @axe-core/playwright. Catches ~30% of a11y issues automatically. + sort_order: 5 + - name: Storybook + url: https://storybook.js.org + description: > + Component development environment with visual testing, documentation, and interaction testing. + detail: > + Use for component isolation and visual regression. The interaction testing addon replaces many integration tests. + sort_order: 6 + + - id: architecture + title: Architecture + objective: > + File structure, module boundaries, monorepo patterns, and decisions that keep large codebases navigable. + color: "#4ade80" + sort_order: 7 + resources: + - name: Bulletproof React + url: https://github.com/alan2207/bulletproof-react + description: > + Opinionated React architecture guide - project structure, patterns, and best practices at scale. + detail: > + The reference architecture for feature-based folder structure. Study the boundaries between features. + sort_order: 1 + - name: Nx + url: https://nx.dev + description: > + Monorepo build system with dependency graph, affected commands, and module boundary enforcement. + detail: > + Understand project boundaries, the dependency graph, and how module federation enables micro-frontends. + sort_order: 2 + - name: Turborepo + url: https://turbo.build/repo + description: > + Incremental build system for monorepos - caches everything, runs only what changed. + detail: > + Compare with Nx. Turborepo is simpler (just builds/tasks), Nx is richer (lint rules, generators). + sort_order: 3 + - name: Module Federation + url: https://module-federation.io + description: > + Runtime module sharing between independently deployed applications - the micro-frontend primitive. + detail: > + Understand shared dependencies, version negotiation, and when micro-frontends are worth the complexity. + sort_order: 4 + - name: Feature-Sliced Design + url: https://feature-sliced.design + description: > + Architectural methodology for frontend projects - layers, slices, and segments with explicit import rules. + detail: > + An opinionated layer system. Compare with Bulletproof React's feature-based approach. + sort_order: 5 + - name: Patterns.dev + url: https://www.patterns.dev + description: > + Comprehensive catalog of design patterns, rendering patterns, and performance patterns for modern web apps. + detail: > + Read the rendering patterns section (SSR, SSG, ISR, streaming) and the React patterns section. + sort_order: 6 diff --git a/curriculum/tracks/gpu-for-ai.yaml b/curriculum/tracks/gpu-for-ai.yaml new file mode 100644 index 0000000..b547ae6 --- /dev/null +++ b/curriculum/tracks/gpu-for-ai.yaml @@ -0,0 +1,512 @@ +id: gpu-for-ai +title: GPU for AI +description: > + Master GPU hardware and software for AI workloads - architecture, CUDA programming, + distributed training, memory optimization, cloud strategy, profiling, networking, + and alternatives to NVIDIA. +difficulty: intermediate +track_type: resource +modules: + - id: architecture + title: GPU Architecture + objective: > + Understand the hardware under your models - Tensor Cores, memory hierarchy, + and what specs actually matter. + color: "#55cdff" + sort_order: 1 + resources: + - name: NVIDIA H100 Whitepaper + url: https://resources.nvidia.com/en-us-tensor-core + description: > + Hopper architecture deep dive: 4th-gen Tensor Cores, Transformer Engine, + FP8 support, HBM3 at 3.35TB/s. + detail: > + The canonical reference for modern AI GPU architecture. Study the memory + hierarchy diagram and Tensor Core operation modes. + sort_order: 1 + - name: NVIDIA Blackwell (B200/GB200) + url: https://www.nvidia.com/en-us/data-center/technologies/blackwell-architecture/ + description: > + 5th-gen Tensor Cores, 192GB HBM3e at 8TB/s, NVLink 5.0, and the two-die + design doubling transistor count. + detail: > + Track the generational leap: 2x memory bandwidth, 4x FP4 throughput. + Understand why Blackwell changes training economics. + sort_order: 2 + - name: GPU Architecture Fundamentals (Lei Mao) + url: https://leimao.github.io/tags/CUDA/ + description: > + Clear technical blog covering SM architecture, warp scheduling, memory + coalescing, and occupancy optimization. + detail: > + Best free resource for understanding GPU internals at the hardware level. + Read the CUDA memory model posts first. + sort_order: 3 + - name: NVIDIA GPU Computing History + url: https://developer.nvidia.com/blog/tag/architecture/ + description: > + Evolution from Kepler to Blackwell: how each generation added capabilities + for AI (Tensor Cores, TF32, FP8). + detail: > + Understand the trajectory: what changed between generations and why each + improvement matters for AI workloads. + sort_order: 4 + - name: Understanding HBM (High Bandwidth Memory) + url: https://www.micron.com/products/high-bandwidth-memory + description: > + How HBM stacks work: 3D-stacked DRAM dies connected via TSVs, delivering + 10x bandwidth over GDDR6. + detail: > + Memory bandwidth is the #1 bottleneck for LLM inference. Understand why + HBM3e matters more than peak FLOPS. + sort_order: 5 + - name: Tensor Core Deep Dive + url: https://developer.nvidia.com/blog/programming-tensor-cores-cuda-9/ + description: > + How Tensor Cores execute matrix multiply-accumulate operations and why they + dominate AI throughput. + detail: > + Study the warp-level matrix operations (WMMA). Understand FP16/BF16/TF32/FP8 + precision modes and when to use each. + sort_order: 6 + + - id: cuda + title: CUDA Programming + objective: > + Write GPU code from scratch - kernels, shared memory, and custom CUDA for AI + workloads. + color: "#ffc47c" + sort_order: 2 + resources: + - name: NVIDIA CUDA Toolkit + url: https://developer.nvidia.com/cuda-toolkit + description: > + The foundational programming model for GPU compute - parallel kernels, + memory management, and the entire NVIDIA software ecosystem. + detail: > + Understand the CUDA programming model (grids, blocks, threads) even if you + never write raw CUDA - it explains why GPU libraries work the way they do. + sort_order: 1 + - name: CUDA by Example (Book) + url: https://developer.nvidia.com/cuda-example + description: > + Hands-on introduction to CUDA programming with practical examples covering + parallel patterns, memory, and synchronization. + detail: > + The best starting point for CUDA. Work through the examples - understanding + threads, blocks, and shared memory is foundational. + sort_order: 2 + - name: Triton (OpenAI) + url: https://github.com/triton-lang/triton + description: > + Python-like language for writing GPU kernels without raw CUDA. Powers PyTorch + custom ops and Flash Attention. + detail: > + The practical path to custom GPU kernels for AI engineers. Higher productivity + than CUDA with 80-90% of the performance. + sort_order: 3 + - name: CUTLASS (NVIDIA) + url: https://github.com/NVIDIA/cutlass + description: > + Template library for high-performance matrix multiplication on Tensor Cores. + The building block behind cuBLAS and TensorRT. + detail: > + Advanced - study this after understanding Tensor Cores. Shows how production + GEMM kernels achieve peak hardware utilization. + sort_order: 4 + - name: ThunderKittens (Stanford) + url: https://github.com/HazyResearch/ThunderKittens + description: > + Embedded DSL for writing GPU kernels in terms of hardware-level operations + on warp tiles. Simplifies Tensor Core programming. + detail: > + Emerging alternative to Triton for kernel development. Study the warp-tile + abstraction for understanding hardware-mapped programming. + sort_order: 5 + - name: GPU Mode Community + url: https://github.com/gpu-mode + description: > + Community lectures and resources on GPU programming, kernel optimization, + and CUDA internals. + detail: > + Watch the lecture series for practical GPU programming knowledge. Good + complement to official NVIDIA documentation. + sort_order: 6 + + - id: distributed + title: Distributed Training + objective: > + Scale training across many GPUs - parallelism strategies, DeepSpeed, and + multi-node setups. + color: "#5bb86e" + sort_order: 3 + resources: + - name: DeepSpeed (Microsoft) + url: https://github.com/microsoft/DeepSpeed + description: > + Multi-GPU and multi-node training with ZeRO optimizer stages, pipeline + parallelism, and mixed precision. + detail: > + The go-to library for training models that don't fit on one GPU. Study ZeRO + stages 1-3 to understand memory vs communication tradeoffs. + sort_order: 1 + - name: PyTorch FSDP + url: https://pytorch.org/docs/stable/fsdp.html + description: > + Fully Sharded Data Parallel - PyTorch-native distributed training sharding + model, optimizer, and gradient states across GPUs. + detail: > + The PyTorch-native alternative to DeepSpeed. Compare FSDP vs ZeRO for your + model size and cluster topology. + sort_order: 2 + - name: Megatron-LM (NVIDIA) + url: https://github.com/NVIDIA/Megatron-LM + description: > + Large-scale transformer training with 3D parallelism (data + tensor + + pipeline) optimized for NVIDIA hardware. + detail: > + The reference implementation for training 100B+ parameter models. Study the + 3D parallelism strategy and sequence parallelism. + sort_order: 3 + - name: PyTorch Distributed Overview + url: https://pytorch.org/tutorials/beginner/dist_overview.html + description: > + Official guide covering DDP, FSDP, RPC, and pipeline parallelism APIs in + PyTorch. + detail: > + Start here for the fundamentals. Understand DDP first (simplest), then + graduate to FSDP and tensor parallelism. + sort_order: 4 + - name: Hugging Face Accelerate + url: https://github.com/huggingface/accelerate + description: > + Wrapper library making multi-GPU and multi-node training accessible with + minimal code changes. + detail: > + Best developer experience for distributed training. Abstracts DeepSpeed/FSDP + behind a simple API. + sort_order: 5 + - name: "Lilac: The Illustrated DeepSpeed ZeRO" + url: https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/ + description: > + Visual explanation of ZeRO stages: how partitioning optimizer states, + gradients, and parameters reduces memory per GPU. + detail: > + Essential reading. The ZeRO paper is the most important distributed training + concept - understand stages 1, 2, and 3. + sort_order: 6 + + - id: memory + title: Memory Optimization + objective: > + Fit larger models and train faster - Flash Attention, mixed precision, and + memory tricks. + color: "#eb5757" + sort_order: 4 + resources: + - name: Flash Attention + url: https://github.com/Dao-AILab/flash-attention + description: > + IO-aware attention algorithm reducing HBM reads/writes from O(N^2) to O(N). + 2-4x speedup, now standard everywhere. + detail: > + Understand the core insight: attention is memory-bound, not compute-bound. + Tiling and recomputation trade compute for memory access. + sort_order: 1 + - name: Gradient Checkpointing + url: https://pytorch.org/docs/stable/checkpoint.html + description: > + Trade compute for memory by recomputing activations during backward pass + instead of storing them. + detail: > + Enables training 2-4x larger models on the same GPU. Understand the + compute/memory tradeoff and where to place checkpoints. + sort_order: 2 + - name: Mixed Precision Training (NVIDIA) + url: https://developer.nvidia.com/blog/mixed-precision-training-deep-neural-networks/ + description: > + FP16/BF16 training with FP32 master weights and loss scaling. 2x memory + reduction and faster Tensor Core utilization. + detail: > + BF16 is the default for modern training. Understand why loss scaling is + needed for FP16 but not BF16. + sort_order: 3 + - name: bitsandbytes + url: https://github.com/bitsandbytes-foundation/bitsandbytes + description: > + 8-bit and 4-bit optimizers and quantization for training and inference. + Enables QLoRA fine-tuning on consumer GPUs. + detail: > + The key enabler for running large models on small GPUs. Study 4-bit + NormalFloat (NF4) quantization and paged optimizers. + sort_order: 4 + - name: Activation Recomputation Strategies + url: https://pytorch.org/docs/stable/distributed.algorithms.ddp_comm_hooks.html + description: > + Selective recomputation of activations during backward pass - balancing + memory savings against compute overhead. + detail: > + Beyond basic checkpointing: learn selective strategies that recompute only + the memory-heavy layers (attention, large FFN). + sort_order: 5 + - name: Memory-Efficient Attention Variants + url: https://github.com/facebookresearch/xformers + description: > + xFormers library with memory-efficient attention, fused operations, and + composable transformer building blocks. + detail: > + Practical toolkit for memory optimization. Study the fused kernels: combining + operations reduces memory traffic. + sort_order: 6 + + - id: cloud + title: Cloud GPU Strategy + objective: > + Spend less on GPUs - spot pricing, provider comparison, and when on-prem beats + cloud. + color: "#5e6ad2" + sort_order: 5 + resources: + - name: Lambda Labs GPU Cloud + url: https://lambdalabs.com/service/gpu-cloud + description: > + On-demand H100 and A100 instances at competitive pricing. Simple API, no + reserved commitment required. + detail: > + Good for burst training. Compare Lambda on-demand pricing vs hyperscaler + reserved instances for your workload pattern. + sort_order: 1 + - name: CoreWeave + url: https://www.coreweave.com + description: > + GPU-native cloud built for AI workloads with Kubernetes-based orchestration, + InfiniBand networking, and H100 clusters. + detail: > + Study the purpose-built GPU cloud architecture. Better networking than + hyperscalers for distributed training. + sort_order: 2 + - name: RunPod + url: https://www.runpod.io + description: > + Serverless GPU platform with spot pricing, persistent storage, and one-click + template deployments. + detail: > + Best for inference and short training runs. Spot instances at 60-70% discount + for fault-tolerant workloads. + sort_order: 3 + - name: vast.ai + url: https://vast.ai + description: > + GPU marketplace connecting renters with idle hardware. Lowest prices but + variable reliability. + detail: > + Study the GPU rental economics. Useful for experimentation but not production + training. + sort_order: 4 + - name: NVIDIA DGX Cloud + url: https://www.nvidia.com/en-us/data-center/dgx-cloud/ + description: > + NVIDIA-managed multi-node GPU clusters with full-stack software optimization. + The premium tier for large-scale training. + detail: > + Understand when managed infrastructure justifies the premium: guaranteed + availability, optimized networking, NVIDIA support. + sort_order: 5 + - name: GPU Cost Calculator (Epoch AI) + url: https://epochai.org/blog/training-compute-of-frontier-ai-models + description: > + Analysis of training compute costs for frontier models. Understand the + economics driving GPU demand. + detail: > + Put GPU costs in context: what does it actually cost to train GPT-4-class + models? How does that scale? + sort_order: 6 + + - id: profiling + title: GPU Profiling + objective: > + Find and fix GPU bottlenecks - profiling, compute vs memory bound, and + systematic optimization. + color: "#f472b6" + sort_order: 6 + resources: + - name: NVIDIA Nsight Systems + url: https://developer.nvidia.com/nsight-systems + description: > + System-wide performance analysis showing CPU-GPU interaction, kernel launches, + memory transfers, and pipeline bubbles. + detail: > + The first profiling tool to reach for. Gives the big picture: where is time + spent, where are the gaps? + sort_order: 1 + - name: NVIDIA Nsight Compute + url: https://developer.nvidia.com/nsight-compute + description: > + Kernel-level profiler showing warp occupancy, memory throughput, instruction + mix, and roofline analysis. + detail: > + Use after Nsight Systems identifies slow kernels. The roofline model tells + you if a kernel is compute-bound or memory-bound. + sort_order: 2 + - name: PyTorch Profiler + url: https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html + description: > + Built-in profiling for PyTorch operations with TensorBoard integration, GPU + kernel tracing, and memory tracking. + detail: > + The easiest way to profile Python-level training code. Use + torch.profiler.profile() to trace GPU operations. + sort_order: 3 + - name: torch.cuda.memory_stats() + url: https://pytorch.org/docs/stable/generated/torch.cuda.memory_stats.html + description: > + Real-time GPU memory tracking: allocated, reserved, peak usage, and + fragmentation metrics. + detail: > + Essential for debugging OOM errors. Call before and after operations to find + memory spikes. + sort_order: 4 + - name: GPU Roofline Model + url: https://developer.nvidia.com/blog/roofline-model-gpu/ + description: > + Framework for understanding whether a workload is compute-bound or + memory-bandwidth-bound on specific hardware. + detail: > + The most important mental model for GPU optimization. Plot your kernel on the + roofline to know which optimization to apply. + sort_order: 5 + - name: Holistic Trace Analysis (Meta) + url: https://github.com/facebookresearch/HolisticTraceAnalysis + description: > + Automated analysis of distributed training traces: idle time, communication + overhead, and load imbalance detection. + detail: > + For distributed training profiling. Identifies scaling bottlenecks across + multi-GPU and multi-node setups. + sort_order: 6 + + - id: networking + title: Cluster Networking + objective: > + The networking that makes or breaks multi-GPU scaling - NVLink, InfiniBand, + and topology design. + color: "#4ade80" + sort_order: 7 + resources: + - name: NVIDIA NVLink & NVSwitch + url: https://www.nvidia.com/en-us/data-center/nvlink/ + description: > + GPU-to-GPU interconnect at 900GB/s (NVLink 4) and 1.8TB/s (NVLink 5). + NVSwitch enables all-to-all GPU communication. + detail: > + NVLink bandwidth determines tensor parallelism efficiency. Understand why + intra-node (NVLink) vs inter-node (InfiniBand) matters. + sort_order: 1 + - name: InfiniBand (NVIDIA Networking) + url: https://www.nvidia.com/en-us/networking/products/infiniband/ + description: > + 400Gb/s inter-node networking with RDMA for zero-copy data transfers. The + standard for GPU cluster interconnects. + detail: > + Study RDMA and GPUDirect: how data moves between GPUs on different nodes + without touching the CPU. + sort_order: 2 + - name: NCCL (NVIDIA Collective Communications) + url: https://github.com/NVIDIA/nccl + description: > + Optimized all-reduce, all-gather, and broadcast primitives for multi-GPU + communication. Used by PyTorch DDP and FSDP. + detail: > + Understand the collective operations: all-reduce for gradient sync, + all-gather for FSDP forward pass, reduce-scatter for backward. + sort_order: 3 + - name: GPUDirect RDMA + url: https://developer.nvidia.com/gpudirect + description: > + Direct GPU-to-GPU memory access across nodes, bypassing CPU and system + memory. Eliminates copy overhead. + detail: > + The performance secret of well-configured GPU clusters. Without GPUDirect, + multi-node scaling hits a wall. + sort_order: 4 + - name: Ultra Ethernet Consortium + url: https://ultraethernet.org + description: > + Open standard for AI-optimized Ethernet networking. Alternative to + InfiniBand with broader vendor support. + detail: > + Track the InfiniBand vs Ethernet competition. Ultra Ethernet aims to match + IB performance at lower cost. + sort_order: 5 + - name: Cluster Topology Design + url: https://docs.nvidia.com/dgx-superpod/reference-architecture/latest/index.html + description: > + NVIDIA DGX SuperPOD reference architecture: how to wire up thousands of GPUs + with optimal bandwidth and fault tolerance. + detail: > + Study fat-tree vs rail-optimized topologies. Understand why cluster + networking is as important as the GPUs themselves. + sort_order: 6 + + - id: alternatives + title: Beyond NVIDIA + objective: > + Know when to look beyond NVIDIA - AMD, Google TPUs, Groq, and where + alternatives win. + color: "#55cdff" + sort_order: 8 + resources: + - name: AMD MI300X & ROCm + url: https://www.amd.com/en/products/accelerators/instinct/mi300.html + description: > + 192GB HBM3 at 5.3TB/s, 1.3x memory capacity vs H100. ROCm software stack + with native PyTorch support. + detail: > + The most credible NVIDIA alternative. Study where MI300X matches H100 + (memory-bound inference) vs where it trails (Tensor Core throughput). + sort_order: 1 + - name: Google TPU v5p / Trillium + url: https://cloud.google.com/tpu + description: > + Custom AI accelerator with high-bandwidth ICI interconnect. Powers Gemini + training and is available via GCP. + detail: > + Study the TPU architecture: systolic array design, ICI networking, and + JAX/XLA programming model. Different paradigm from CUDA. + sort_order: 2 + - name: Intel Gaudi 3 + url: https://www.intel.com/content/www/us/en/products/details/processors/ai-accelerators/gaudi.html + description: > + Intel's AI accelerator targeting training and inference with integrated RoCE + networking and competitive pricing. + detail: > + Track Intel's play as a third GPU vendor. Gaudi has some AWS adoption but + ecosystem maturity lags NVIDIA. + sort_order: 3 + - name: Groq LPU + url: https://groq.com + description: > + Deterministic inference chip with no HBM - uses SRAM for ultra-low latency. + Specialized for token generation. + detail: > + Study the SRAM-only architecture: why it achieves extreme token/sec but can't + train models. Inference-only hardware. + sort_order: 4 + - name: Cerebras WSE-3 + url: https://www.cerebras.net + description: > + Wafer-scale engine: single chip with 4 trillion transistors. Eliminates + multi-GPU communication overhead for training. + detail: > + Study the radical approach: one giant chip vs many small GPUs. Understand the + memory and cooling challenges at wafer scale. + sort_order: 5 + - name: AWS Trainium / Inferentia + url: https://aws.amazon.com/machine-learning/trainium/ + description: > + Amazon's custom AI chips for training (Trainium) and inference (Inferentia). + Tight integration with SageMaker. + detail: > + Study the hyperscaler custom silicon trend. Lower cost per FLOP but narrower + software support than NVIDIA. + sort_order: 6 diff --git a/curriculum/tracks/interview-prep.yaml b/curriculum/tracks/interview-prep.yaml new file mode 100644 index 0000000..dbe2ffd --- /dev/null +++ b/curriculum/tracks/interview-prep.yaml @@ -0,0 +1,138 @@ +id: interview-prep +title: Interview Prep +description: > + Prepare for technical interviews - system design, coding, and behavioral. +difficulty: intermediate +track_type: resource +modules: + - id: faang + title: FAANG Prep + objective: > + Sharpen your coding skills with drills, mock interviews, and a weekly prep rhythm. + color: "#55cdff" + sort_order: 1 + resources: + - name: NeetCode + url: https://neetcode.io + description: Structured DSA roadmaps and curated coding problems. + detail: Use for daily algorithm reps and pattern memorization. + sort_order: 1 + - name: AlgoMonster + url: https://algo.monster + description: Pattern-first interview prep with implementation templates. + detail: Use when you want to learn fast classification of problems. + sort_order: 2 + - name: AlgoExpert.io + url: https://www.algoexpert.io + description: Video-first explanations with medium/hard interview sets. + detail: Use to sharpen coding explanation speed before live rounds. + sort_order: 3 + - name: Interviewing.io + url: https://interviewing.io + description: Anonymous mock interviews and transcript-style feedback. + detail: Use weekly for pressure testing and communication practice. + sort_order: 4 + - name: Codemia + url: https://codemia.io + description: Community problems and collaborative coding interview practice. + detail: Use to diversify problem exposure and peer review loops. + sort_order: 5 + + - id: system_design + title: System Design Prep + objective: > + Practice designing systems under pressure - case studies, drills, and mock interviews. + color: "#5bb86e" + sort_order: 2 + resources: + - name: Designing Data-Intensive Applications + url: https://dataintensive.net + description: Foundational distributed-systems and data-system principles. + detail: Use as the base theory layer for tradeoff reasoning. + sort_order: 1 + - name: System Design Interview (Alex Xu) + url: https://www.amazon.com/System-Design-Interview-insiders-Second/dp/B08CMF2CQF + description: Interview-focused architecture templates and common system cases. + detail: Use for concise frameworks and quick recap sheets. + sort_order: 2 + - name: HelloInterview + url: https://www.hellointerview.com + description: Role-tailored system design prompts and structured answer guides. + detail: Use to practice problem framing and clarifying questions. + sort_order: 3 + - name: Exponent + url: https://www.tryexponent.com + description: System design lessons and mock-interview walkthroughs. + detail: Use for visual case breakdowns and timed answer practice. + sort_order: 4 + - name: DesignGurus + url: https://www.designgurus.io + description: Catalog of system design interview scenarios and drills. + detail: Use to expand scenario coverage beyond common cases. + sort_order: 5 + - name: Codemia + url: https://codemia.io + description: Community-driven architecture prompts and iterative feedback. + detail: Use to practice whiteboard flow and architecture narration. + sort_order: 6 + + - id: take_home + title: Take-Homes + objective: > + Practice real take-home assignments from actual interview processes and learn the patterns. + color: "#4ade80" + sort_order: 3 + resources: + # Take-home tab uses seeded assignment data, not PrepResource[]. + # The 8 seeded assignments are localStorage-backed interactive content, + # not static resource links. Listing them here for reference. + - name: "Stripe - Build a Rate Limiter Service" + url: "" + description: "Design and implement a rate-limiting service with multiple strategies (fixed window, sliding window, token bucket)." + detail: "4-6 hours, any backend language. Medium difficulty." + sort_order: 1 + - name: "Datadog - Log Aggregation Pipeline" + url: "" + description: "Build a simplified log ingestion pipeline that handles 1000+ events/second with time range and severity queries." + detail: "8 hours, Go or Python. Hard difficulty." + sort_order: 2 + - name: "Plaid - Transaction Categorizer" + url: "" + description: "Build a web app for viewing bank transactions and assigning categories with a rules engine." + detail: "4 hours, React + Node/Python. Medium difficulty." + sort_order: 3 + - name: "Notion - Kanban Board" + url: "" + description: "Build a Kanban board with drag-and-drop, card CRUD, and persistence. Focus on UX polish and accessibility." + detail: "3-4 hours, React or Vue. Medium difficulty." + sort_order: 4 + - name: "Scale AI - Text Classification Pipeline" + url: "" + description: "Build an end-to-end text classification pipeline with training, evaluation, and a prediction API." + detail: "6-8 hours, Python + HuggingFace or scikit-learn. Hard difficulty." + sort_order: 5 + - name: "Figma - Collaborative Cursor Presence" + url: "" + description: "Build a multi-user real-time cursor presence app using WebSockets on a shared canvas." + detail: "4-6 hours, TypeScript + WebSocket. Hard difficulty." + sort_order: 6 + - name: "Generic - URL Shortener" + url: "" + description: "Design and implement a URL shortener with redirect, click analytics, and scaling notes." + detail: "2-3 hours, any language. Easy difficulty." + sort_order: 7 + - name: "Airbnb - ETL Pipeline for Event Data" + url: "" + description: "Build an ETL pipeline with deduplication, schema validation, enrichment, and dead-letter queue logic." + detail: "6 hours, Python + SQL. Medium difficulty." + sort_order: 8 + + - id: interview_feedback + title: Interview Feedback + objective: > + Track what went wrong in past interviews and extract lessons for next time. + color: "#55cdff" + sort_order: 4 + resources: [] + # Interview feedback tab is a localStorage-backed CRUD interface (company, role, + # date, stage, outcome, feedback, lessons). No static resource links. diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index 92beabb..cd49927 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -4,13 +4,10 @@ import { Activity, BarChart3, Blocks, - BookOpen, Brain, BriefcaseBusiness, CalendarDays, CheckSquare, - ChevronDown, - ChevronRight, Cog, Cpu, Database, @@ -754,69 +751,6 @@ function get_databases_view(tab: string | undefined): ViewId { return (found?.view as ViewId) ?? "databases_overview"; } -const CULTURE_GENERALE_SCIENCES_ITEMS = [ - { - label: "Physique", - track: "sciences", - tab: "physics", - view: "culture_generale_sciences_physics", - }, - { - label: "Theorie de l'Information", - track: "sciences", - tab: "information_theory", - view: "culture_generale_sciences_information_theory", - }, - { - label: "Bio & Neurosciences", - track: "sciences", - tab: "biology_neuro", - view: "culture_generale_sciences_biology_neuro", - }, -] as const; - -const CULTURE_GENERALE_HUMANITES_ITEMS = [ - { - label: "Philo des Sciences", - track: "humanites", - tab: "philo_science", - view: "culture_generale_humanites_philo_science", - }, - { - label: "Philosophie Francaise", - track: "humanites", - tab: "french_philo", - view: "culture_generale_humanites_french_philo", - }, - { - label: "Litterature & Culture", - track: "humanites", - tab: "literature", - view: "culture_generale_humanites_literature", - }, -] as const; - -const CULTURE_GENERALE_SCIENCES_SOCIALES_ITEMS = [ - { - label: "Economie & Jeux", - track: "sciences_sociales", - tab: "economics", - view: "culture_generale_sciences_sociales_economics", - }, - { - label: "Philo Politique", - track: "sciences_sociales", - tab: "political_philo", - view: "culture_generale_sciences_sociales_political_philo", - }, - { - label: "Histoire & Civilisation", - track: "sciences_sociales", - tab: "history", - view: "culture_generale_sciences_sociales_history", - }, -] as const; - function read_expandable_nav_state(): Record { try { const saved = window.localStorage.getItem(SUB_NAV_EXPANDED_KEY); @@ -2181,7 +2115,6 @@ export function Sidebar({ const current_conversation_id = active_view === "chat" ? (params.conversationId ?? null) : null; const sidebar_scroll_ref = useRef(null); - const [reference_show_all, set_reference_show_all] = useState(false); const [recent_chats_expanded, set_recent_chats_expanded] = useState(() => { try { const saved = window.localStorage.getItem(RECENT_CHATS_EXPANDED_KEY); @@ -2464,14 +2397,14 @@ export function Sidebar({ onClick={() => navigate({ to: "/learn" })} /> navigate({ to: "/learn/lenses" })} - /> - navigate({ to: "/learn/levels" })} + label="Dev Reference" + active={active_view.startsWith("reference_dev_ref")} + onClick={() => + navigate({ + to: "/reference", + search: { section: "dev-ref" }, + }) + } /> navigate({ to: "/culture-generale" })} /> - navigate({ to: "/cognitive-toolkit" })} - /> - navigate({ to: "/behavioral-design" })} - />
@@ -2896,1237 +2819,6 @@ export function Sidebar({ /> -
- - {/* Reference (collapsed by default) */} -
-
- } - label="Reference" - active={active_view.startsWith("reference_")} - onClick={() => navigate({ to: "/reference" })} - /> -
- {sidebarExpanded && ( - toggle_expandable_nav("reference")} - className="p-1 text-[var(--sb-sidebar-text-muted)] hover:text-[var(--sb-text-primary)] transition-colors" - label={ - expandable_nav.reference - ? "Minimize reference tabs" - : "Expand reference tabs" - } - tooltipSide="right" - > - {expandable_nav.reference ? ( - - ) : ( - - )} - - )} -
- -

- Interview Prep -

- {PREP_ITEMS.map((item) => ( - - navigate({ - to: "/reference", - search: { section: "prep", tab: item.tab }, - }) - } - /> - ))} - - - {DEV_REF_LANGUAGE_ITEMS.map((item) => ( - - navigate({ - to: "/reference", - search: { section: "dev-ref", tab: item.tab }, - }) - } - /> - ))} - - - - {DEV_REF_STACK_ITEMS.map((item) => ( - - navigate({ - to: "/reference", - search: { section: "dev-ref", tab: item.tab }, - }) - } - /> - ))} - -

- Databases -

- {DATABASES_ITEMS.map((item) => ( - - navigate({ - to: "/reference", - search: { section: "databases", tab: item.tab }, - }) - } - /> - ))} -

- Applied Systems -

- - navigate({ - to: "/reference", - search: { section: "applied-systems", tab: "llmops" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "applied-systems", tab: "recsys" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "applied-systems", tab: "dataops" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "applied-systems", tab: "evals" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "applied-systems", tab: "worldmodels" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "applied-systems", tab: "3d_vision" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "applied-systems", - tab: "distributed_ml", - }, - }) - } - /> - - {reference_show_all && ( - <> -

- Maths — Zero to One -

- - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "zero_to_one", - tab: "methode", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "zero_to_one", - tab: "diagnostic", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "zero_to_one", - tab: "college", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "zero_to_one", - tab: "lycee", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "zero_to_one", - tab: "terminale", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "zero_to_one", - tab: "evaluation", - }, - }) - } - /> -

- Maths — Prepa ML -

- - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "methode", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "linear_algebra", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "analysis", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "probability", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "applied_ml", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "geometry_3d", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "dynamics_physics", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "math-refresh", - track: "prepa_ml", - tab: "evaluation", - }, - }) - } - /> -

- Elite Freelance -

- - navigate({ - to: "/reference", - search: { - section: "elite-freelance", - tab: "realtime_systems", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "elite-freelance", - tab: "apis_at_scale", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "elite-freelance", - tab: "ai_agent_infra", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "elite-freelance", - tab: "production_hardening", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "elite-freelance", - tab: "positioning", - }, - }) - } - /> -

- AI Engineering -

- - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "inference" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "agents" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "evals" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "retrieval" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "memory" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "fine_tuning" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "multimodal" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "ai-engineering", tab: "reasoning" }, - }) - } - /> -

- Frontend Engineering -

- - navigate({ - to: "/reference", - search: { section: "frontend-eng", tab: "state" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "frontend-eng", tab: "components" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "frontend-eng", tab: "data_layer" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "frontend-eng", tab: "performance" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "frontend-eng", tab: "typescript" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "frontend-eng", tab: "testing" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "frontend-eng", tab: "architecture" }, - }) - } - /> -

- GPUs for AI -

- navigate({ to: "/how-gpu-works", search: {} })} - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "architecture" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "cuda" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "distributed" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "memory" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "cloud" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "profiling" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "networking" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "gpu-for-ai", tab: "alternatives" }, - }) - } - /> -

- Embodied AI -

- - navigate({ - to: "/reference", - search: { section: "embodied-ai", tab: "world_models" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "embodied-ai", tab: "core" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "embodied-ai", tab: "humanoid" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "embodied-ai", tab: "service" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "embodied-ai", tab: "autonomous" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "embodied-ai", tab: "agentic" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "embodied-ai", tab: "edge_inference" }, - }) - } - /> -

- Bio-Augmentation -

- - navigate({ - to: "/reference", - search: { - section: "bio-augmentation", - tab: "foundations", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "bio-augmentation", tab: "neurotech" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "bio-augmentation", tab: "wearables" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "bio-augmentation", - tab: "biohacking", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "bio-augmentation", - tab: "translation", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "bio-augmentation", - tab: "convergence", - }, - }) - } - /> -

- Chinese Learning -

- - navigate({ - to: "/reference", - search: { section: "chinese", tab: "dashboard" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "chinese", tab: "vocab" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "chinese", tab: "lessons" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "chinese", tab: "review" }, - }) - } - /> -

- Cantonese Learning -

- - navigate({ - to: "/reference", - search: { section: "cantonese", tab: "dashboard" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "cantonese", tab: "vocab" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "cantonese", tab: "lessons" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "cantonese", tab: "review" }, - }) - } - /> -

- Culture Generale — Sciences -

- {CULTURE_GENERALE_SCIENCES_ITEMS.map((item) => ( - - navigate({ - to: "/reference", - search: { - section: "culture-generale", - track: item.track, - tab: item.tab, - }, - }) - } - /> - ))} -

- Culture Generale — Humanites -

- {CULTURE_GENERALE_HUMANITES_ITEMS.map((item) => ( - - navigate({ - to: "/reference", - search: { - section: "culture-generale", - track: item.track, - tab: item.tab, - }, - }) - } - /> - ))} -

- Culture Generale — Sciences Sociales -

- {CULTURE_GENERALE_SCIENCES_SOCIALES_ITEMS.map((item) => ( - - navigate({ - to: "/reference", - search: { - section: "culture-generale", - track: item.track, - tab: item.tab, - }, - }) - } - /> - ))} -

- Cognitive Toolkit -

- - navigate({ - to: "/reference", - search: { - section: "cognitive-toolkit", - tab: "foundation", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "cognitive-toolkit", - tab: "operating_system", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "cognitive-toolkit", - tab: "techniques", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "cognitive-toolkit", - tab: "worldview", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "cognitive-toolkit", tab: "library" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "cognitive-toolkit", tab: "playbook" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "cognitive-toolkit", - tab: "operators", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "cognitive-toolkit", - tab: "social_dynamics", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "cognitive-toolkit", - tab: "ai_leverage", - }, - }) - } - /> -

- Behavioral Design -

- - navigate({ - to: "/reference", - search: { - section: "behavioral-design", - tab: "frameworks", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "behavioral-design", - tab: "feed_design", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "behavioral-design", - tab: "social_loops", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "behavioral-design", - tab: "variable_rewards", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { section: "behavioral-design", tab: "friction" }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "behavioral-design", - tab: "notifications", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "behavioral-design", - tab: "gamification", - }, - }) - } - /> - - navigate({ - to: "/reference", - search: { - section: "behavioral-design", - tab: "case_studies", - }, - }) - } - /> - - )} -
{/* Personal */}
diff --git a/frontend/src/lib/api/endpoints.ts b/frontend/src/lib/api/endpoints.ts index 9096337..bb7225f 100644 --- a/frontend/src/lib/api/endpoints.ts +++ b/frontend/src/lib/api/endpoints.ts @@ -1887,4 +1887,23 @@ export const api = { `/curriculum/tracks/${encodeURIComponent(trackId)}/modules/${encodeURIComponent(moduleId)}/progress`, ); }, + + getCurriculumModuleResources(trackId: string, moduleId: string) { + return apiRequest<{ + track_id: string; + module_id: string; + resources: import("./types").TrackResource[]; + total: number; + }>( + `/curriculum/tracks/${encodeURIComponent(trackId)}/modules/${encodeURIComponent(moduleId)}/resources`, + ); + }, + + getCurriculumTrackResources(trackId: string) { + return apiRequest<{ + track_id: string; + resources: import("./types").TrackResource[]; + total: number; + }>(`/curriculum/tracks/${encodeURIComponent(trackId)}/resources`); + }, }; diff --git a/frontend/src/lib/api/types.ts b/frontend/src/lib/api/types.ts index 62a873b..7013e30 100644 --- a/frontend/src/lib/api/types.ts +++ b/frontend/src/lib/api/types.ts @@ -1536,14 +1536,27 @@ export interface TeachingLensSummary { // --- Curriculum --- +export interface TrackResource { + id: number; + name: string; + url: string | null; + description: string; + detail: string | null; + resource_type: "link" | "reference" | "knowledge"; + sort_order: number; + metadata_json: string | null; +} + export interface CurriculumTrackSummary { id: string; title: string; description: string; difficulty: string; + track_type: "concept" | "resource"; is_published: boolean; module_count: number; concept_count: number; + resource_count: number; } export interface CurriculumTrackListResponse { @@ -1557,7 +1570,9 @@ export interface CurriculumModuleSummary { objective: string; estimated_time_minutes: number; sort_order: number; + color: string | null; concept_count: number; + resource_count: number; } export interface CurriculumTrackDetail { @@ -1565,6 +1580,7 @@ export interface CurriculumTrackDetail { title: string; description: string; difficulty: string; + track_type: "concept" | "resource"; is_published: boolean; modules: CurriculumModuleSummary[]; } @@ -1584,7 +1600,9 @@ export interface CurriculumModuleDetail { objective: string; estimated_time_minutes: number; sort_order: number; + color: string | null; concepts: CurriculumConceptRef[]; + resources: TrackResource[]; } export interface CurriculumModuleProgress { diff --git a/frontend/src/lib/query/keys.ts b/frontend/src/lib/query/keys.ts index 2064511..bb95718 100644 --- a/frontend/src/lib/query/keys.ts +++ b/frontend/src/lib/query/keys.ts @@ -183,4 +183,6 @@ export const queryKeys = { ["curriculum-module", trackId, moduleId] as const, curriculumModuleProgress: (trackId: string, moduleId: string) => ["curriculum-module-progress", trackId, moduleId] as const, + curriculumTrackResources: (trackId: string) => + ["curriculum-track-resources", trackId] as const, }; diff --git a/frontend/src/routes/curriculum.tsx b/frontend/src/routes/curriculum.tsx index 7d21f9a..ae1f014 100644 --- a/frontend/src/routes/curriculum.tsx +++ b/frontend/src/routes/curriculum.tsx @@ -6,15 +6,15 @@ const CurriculumTracksView = lazy(() => })), ); -const CurriculumTrackDetailView = lazy(() => - import("../views/CurriculumTrackDetailView").then((m) => ({ - default: m.CurriculumTrackDetailView, +const TrackDetailView = lazy(() => + import("../views/TrackDetailView").then((m) => ({ + default: m.TrackDetailView, })), ); -const CurriculumModuleDetailView = lazy(() => - import("../views/CurriculumModuleDetailView").then((m) => ({ - default: m.CurriculumModuleDetailView, +const ModuleDetailView = lazy(() => + import("../views/TrackDetailView").then((m) => ({ + default: m.ModuleDetailView, })), ); @@ -37,7 +37,7 @@ export function CurriculumTracksRoute() { export function CurriculumTrackDetailRoute() { return ( }> - + ); } @@ -45,7 +45,7 @@ export function CurriculumTrackDetailRoute() { export function CurriculumModuleDetailRoute() { return ( }> - + ); } diff --git a/frontend/src/views/CurriculumTracksView.tsx b/frontend/src/views/CurriculumTracksView.tsx index e7ad3ce..248fd40 100644 --- a/frontend/src/views/CurriculumTracksView.tsx +++ b/frontend/src/views/CurriculumTracksView.tsx @@ -1,6 +1,12 @@ import { useNavigate } from "@tanstack/react-router"; import { motion } from "motion/react"; -import { BookOpen, Clock, Layers, GraduationCap } from "lucide-react"; +import { + BookOpen, + Clock, + Layers, + GraduationCap, + Link as LinkIcon, +} from "lucide-react"; import { useDocumentTitle } from "../hooks/useDocumentTitle"; import { PremiumHero, PremiumPage } from "../components/layout/PremiumShell"; import { api } from "../lib/api/endpoints"; @@ -33,7 +39,7 @@ export function CurriculumTracksView() { @@ -73,6 +79,10 @@ function TrackCard({ index: number; onClick: () => void; }) { + const isResource = track.track_type === "resource"; + const itemCount = isResource ? track.resource_count : track.concept_count; + const itemLabel = isResource ? "resource" : "concept"; + return ( - - {track.concept_count} concept - {track.concept_count !== 1 ? "s" : ""} + {isResource ? ( + + ) : ( + + )} + {itemCount} {itemLabel} + {itemCount !== 1 ? "s" : ""} diff --git a/frontend/src/views/DevRefView.tsx b/frontend/src/views/DevRefView.tsx index f309c13..5274f7f 100644 --- a/frontend/src/views/DevRefView.tsx +++ b/frontend/src/views/DevRefView.tsx @@ -1,4 +1,4 @@ -import { useSearch } from "@tanstack/react-router"; +import { useSearch, useNavigate } from "@tanstack/react-router"; import { CodeBlock } from "../components/CodeBlock"; import { useDocumentTitle } from "../hooks/useDocumentTitle"; @@ -8683,8 +8683,125 @@ const TAB_CONTENT: Record< // Main view // --------------------------------------------------------------------------- +const TAB_CATEGORIES: { label: string; tabs: DevRefTab[] }[] = [ + { + label: "Languages & Shell", + tabs: [ + "python", + "sql", + "rust", + "cpp", + "typescript", + "go", + "linux", + "bash", + "tmux", + "swift", + "nodejs", + ], + }, + { + label: "Frontend", + tabs: [ + "react", + "tanstack_router", + "vite", + "tailwind", + "radix_ui", + "framer_motion", + "d3", + "vitest", + "playwright", + ], + }, + { + label: "Backend & APIs", + tabs: [ + "fastapi", + "httpx", + "graphql", + "grpc", + "rest_api_design", + "pydantic", + "oauth", + "sse", + "nginx", + "pytest", + ], + }, + { + label: "AI & Retrieval", + tabs: [ + "anthropic", + "langchain", + "openai", + "chromadb", + "cohere", + "embeddings", + "langfuse", + "arize", + ], + }, + { + label: "Databases", + tabs: ["postgresql", "redis", "sqlite", "mongodb", "duckdb", "clickhouse"], + }, + { label: "Data Processing", tabs: ["polars", "pandas", "spark", "kafka"] }, + { + label: "Data Platform", + tabs: [ + "databricks", + "snowflake", + "bigquery", + "dbt", + "airflow", + "astronomer", + "dagster", + "airbyte", + "iceberg", + "data_governance", + ], + }, + { + label: "ML & Deep Learning", + tabs: [ + "pytorch", + "scikit_learn", + "huggingface", + "cuda", + "model_serving", + "ray", + "mlflow", + "feast", + "sagemaker", + "vertex_ai", + ], + }, + { + label: "Infra & DevOps", + tabs: [ + "git", + "docker", + "kubernetes", + "helm", + "terraform", + "github_actions", + "flyio", + "aws", + "gcp", + "cloudflare_workers", + "temporal", + ], + }, + { + label: "Observability & Cost", + tabs: ["opentelemetry", "prometheus", "grafana", "sentry", "finops"], + }, +]; + export function DevRefView() { const search = useSearch({ strict: false }) as { tab?: string }; + const navigate = useNavigate(); const active_tab: DevRefTab = TAB_ORDER.includes(search.tab as DevRefTab) ? (search.tab as DevRefTab) : "python"; @@ -8704,6 +8821,46 @@ export function DevRefView() { description={active_tab_meta.description} /> + {/* Category-grouped tab navigator */} +
+ {TAB_CATEGORIES.map((cat) => ( +
+ + {cat.label} + + {cat.tabs.map((tab) => { + const isActive = tab === active_tab; + const meta = TAB_META[tab]; + return ( + + ); + })} +
+ ))} +
+
+ {/* Tab navigator */} +
+ {TAB_ORDER.map((tab) => { + const isActive = tab === active_tab; + return ( + + ); + })} +
+
= { + beginner: "Beginner", + intermediate: "Intermediate", + advanced: "Advanced", +}; + +const READINESS_CONFIG: Record< + string, + { label: string; color: string; bg: string } +> = { + rich: { + label: "Rich", + color: "#4ade80", + bg: "rgba(74, 222, 128, 0.08)", + }, + grounded: { + label: "Grounded", + color: "#55cdff", + bg: "rgba(85, 205, 255, 0.08)", + }, + scaffolded: { + label: "Scaffolded", + color: "#ffc47c", + bg: "rgba(255, 196, 124, 0.08)", + }, +}; + +export function TrackDetailView() { + const { trackId } = useParams({ strict: false }) as { trackId: string }; + const navigate = useNavigate(); + + const { data: track, isLoading } = useQuery( + queryKeys.curriculumTrack(trackId), + () => api.getCurriculumTrack(trackId), + { staleTimeMs: 30_000 }, + ); + + useDocumentTitle(track?.title ?? "Track"); + + if (isLoading || !track) { + return ( + + +
+
+
+ + ); + } + + if (track.track_type === "resource") { + return ; + } + + return ( + + + + +
+ + + {track.modules.length} module + {track.modules.length !== 1 ? "s" : ""} + +
+ +
+ {track.modules.map((mod, i) => ( + + navigate({ + to: "/learn/tracks/$trackId/modules/$moduleId", + params: { trackId, moduleId: mod.id }, + }) + } + /> + ))} +
+
+ ); +} + +function ResourceTrackView({ trackId }: { trackId: string }) { + const search = useSearch({ strict: false }) as { tab?: string }; + const navigate = useNavigate(); + + const { data: track } = useQuery( + queryKeys.curriculumTrack(trackId), + () => api.getCurriculumTrack(trackId), + { staleTimeMs: 30_000 }, + ); + + const activeId = track + ? (track.modules.find((m) => m.id === search.tab)?.id ?? + track.modules[0]?.id ?? + "") + : ""; + + const { data: mod } = useQuery( + queryKeys.curriculumModule(trackId, activeId), + () => api.getCurriculumModule(trackId, activeId), + { staleTimeMs: 30_000, enabled: !!activeId }, + ); + + if (!track) return null; + + const activeMod = track.modules.find((m) => m.id === activeId); + const tabColor = activeMod?.color ?? "#55cdff"; + + return ( + + + + + {/* Tab bar */} +
+ {track.modules.map((m) => { + const isActive = m.id === activeId; + return ( + + ); + })} +
+ + {/* Module stats */} +
+ + + {activeMod?.resource_count ?? 0} resource + {(activeMod?.resource_count ?? 0) !== 1 ? "s" : ""} + +
+ + {/* Resources grid */} +
+ {mod?.resources.map((resource, i) => ( + + ))} +
+ + {mod?.resources.length === 0 && ( +
+ No resources in this module yet. +
+ )} +
+ ); +} + +function BackButton() { + const navigate = useNavigate(); + return ( + + ); +} + +function ConceptModuleRow({ + module: mod, + index, + onClick, +}: { + module: CurriculumModuleSummary; + index: number; + onClick: () => void; +}) { + return ( + +
+ {index + 1} +
+
+
+ {mod.title} +
+

+ {mod.objective} +

+
+
+ + + {mod.concept_count} + + + + {mod.estimated_time_minutes}m + + +
+
+ ); +} + +export function ModuleDetailView() { + const { trackId, moduleId } = useParams({ strict: false }) as { + trackId: string; + moduleId: string; + }; + const navigate = useNavigate(); + + const { data: mod, isLoading } = useQuery( + queryKeys.curriculumModule(trackId, moduleId), + () => api.getCurriculumModule(trackId, moduleId), + { staleTimeMs: 30_000 }, + ); + + const { data: progress } = useQuery( + queryKeys.curriculumModuleProgress(trackId, moduleId), + () => api.getCurriculumModuleProgress(trackId, moduleId), + { staleTimeMs: 30_000 }, + ); + + useDocumentTitle(mod?.title ?? "Module"); + + if (isLoading || !mod) { + return ( + + +
+
+
+ + ); + } + + const accentColor = mod.color ?? "#55cdff"; + const hasResources = mod.resources.length > 0; + const hasConcepts = mod.concepts.length > 0; + + return ( + + + + + +
+ {hasConcepts && ( + + + {mod.concepts.length} concept + {mod.concepts.length !== 1 ? "s" : ""} + + )} + {hasResources && ( + + + {mod.resources.length} resource + {mod.resources.length !== 1 ? "s" : ""} + + )} + + + {mod.estimated_time_minutes} min + + {progress && progress.total > 0 && ( + + {progress.completed} of {progress.total} done + + )} +
+ + {hasConcepts && ( +
+ {mod.concepts.map((concept, i) => ( + + navigate({ + to: "/learn/$conceptId", + params: { conceptId: concept.concept_id }, + }) + } + /> + ))} +
+ )} + + {hasResources && ( +
+ {mod.resources.map((resource, i) => ( + + ))} +
+ )} +
+ ); +} + +function ConceptRow({ + concept, + index, + onClick, +}: { + concept: CurriculumConceptRef; + index: number; + onClick: () => void; +}) { + const readiness = + READINESS_CONFIG[concept.readiness_state] ?? READINESS_CONFIG.scaffolded; + + return ( + +
+ {concept.sort_order} +
+
+
+ {concept.title} +
+
+
+ + {readiness.label} + + +
+
+ ); +} From 62169148e18bd2c61cd785c2170be84a4ed995e2 Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Tue, 17 Mar 2026 17:09:21 +0100 Subject: [PATCH 05/10] fix(math): remove math product surfaces from Samaritan Delete the entire math feature family: /math, /math/refresh, /math/bridge and legacy redirects /math-refresh, /math-bridge. Removes 22 math-only files (views, routes, components, hooks, data), cleans sidebar nav, route tree, ViewId entries, workflow inventory, command palette, reference sections, and landing page references. MathRenderer.tsx preserved (moved to components/) - still used by culture-generale and ConceptDetailView. --- .../{math-exercise => }/MathRenderer.tsx | 0 frontend/src/components/Sidebar.tsx | 1024 +++---- .../landing/sections/DomainsSection.tsx | 3 - .../landing/sections/WhyAdrienSection.tsx | 5 - .../components/math-exercise/ExerciseCard.tsx | 274 -- .../math-exercise/ExerciseSession.tsx | 239 -- .../components/math-exercise/MathInput.tsx | 166 -- .../components/math-exercise/PracticeTab.tsx | 217 -- .../components/math-exercise/answer-check.ts | 140 - .../components/math-exercise/exercise-bank.ts | 1499 ---------- .../math-exercise/exercise-types.ts | 108 - frontend/src/hooks/useMathProgress.ts | 133 - frontend/src/lib/workflowInventory.ts | 18 +- frontend/src/router.tsx | 168 -- frontend/src/routes/math-bridge.tsx | 23 - frontend/src/routes/math-refresh.tsx | 23 - frontend/src/routes/math.tsx | 23 - frontend/src/views/CareerFoundationsView.tsx | 2 +- frontend/src/views/CompletionView.tsx | 14 +- frontend/src/views/ConceptDetailView.tsx | 2 +- frontend/src/views/MathBridgeLevelContent.tsx | 374 --- frontend/src/views/MathBridgeMicroCheck.tsx | 193 -- frontend/src/views/MathBridgeOverview.tsx | 227 -- .../src/views/MathBridgeReadinessBanner.tsx | 129 - frontend/src/views/MathBridgeView.tsx | 83 - frontend/src/views/MathLandingView.tsx | 148 - frontend/src/views/MathRefreshView.tsx | 2437 ----------------- frontend/src/views/MightyGodModeView.tsx | 1 - frontend/src/views/ReferenceView.tsx | 4 - .../views/culture-generale/LessonPanel.tsx | 2 +- .../src/views/culture-generale/QuizEngine.tsx | 2 +- frontend/src/views/math-bridge-checks.ts | 220 -- frontend/src/views/math-bridge-data.ts | 890 ------ frontend/src/views/math-bridge-progress.ts | 245 -- 34 files changed, 446 insertions(+), 8590 deletions(-) rename frontend/src/components/{math-exercise => }/MathRenderer.tsx (100%) delete mode 100644 frontend/src/components/math-exercise/ExerciseCard.tsx delete mode 100644 frontend/src/components/math-exercise/ExerciseSession.tsx delete mode 100644 frontend/src/components/math-exercise/MathInput.tsx delete mode 100644 frontend/src/components/math-exercise/PracticeTab.tsx delete mode 100644 frontend/src/components/math-exercise/answer-check.ts delete mode 100644 frontend/src/components/math-exercise/exercise-bank.ts delete mode 100644 frontend/src/components/math-exercise/exercise-types.ts delete mode 100644 frontend/src/hooks/useMathProgress.ts delete mode 100644 frontend/src/routes/math-bridge.tsx delete mode 100644 frontend/src/routes/math-refresh.tsx delete mode 100644 frontend/src/routes/math.tsx delete mode 100644 frontend/src/views/MathBridgeLevelContent.tsx delete mode 100644 frontend/src/views/MathBridgeMicroCheck.tsx delete mode 100644 frontend/src/views/MathBridgeOverview.tsx delete mode 100644 frontend/src/views/MathBridgeReadinessBanner.tsx delete mode 100644 frontend/src/views/MathBridgeView.tsx delete mode 100644 frontend/src/views/MathLandingView.tsx delete mode 100644 frontend/src/views/MathRefreshView.tsx delete mode 100644 frontend/src/views/math-bridge-checks.ts delete mode 100644 frontend/src/views/math-bridge-data.ts delete mode 100644 frontend/src/views/math-bridge-progress.ts diff --git a/frontend/src/components/math-exercise/MathRenderer.tsx b/frontend/src/components/MathRenderer.tsx similarity index 100% rename from frontend/src/components/math-exercise/MathRenderer.tsx rename to frontend/src/components/MathRenderer.tsx diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index cd49927..2d4b2f6 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -64,13 +64,6 @@ export type ViewId = | "product_lab" | "career" | "career_opportunities" - | "math_landing" - | "math_refresh_active" - | "math_bridge" - | "math_bridge_core_numeracy" - | "math_bridge_high_school" - | "math_bridge_pre_university" - | "math_bridge_engineering_prep" | "career_foundations" | "career_foundations_msc" | "career_foundations_assessment" @@ -305,20 +298,6 @@ export type ViewId = | "applied_systems_worldmodels" | "applied_systems_3d_vision" | "applied_systems_distributed_ml" - | "math_refresh_z2o_methode" - | "math_refresh_z2o_diagnostic" - | "math_refresh_z2o_college" - | "math_refresh_z2o_lycee" - | "math_refresh_z2o_terminale" - | "math_refresh_z2o_evaluation" - | "math_refresh_pml_methode" - | "math_refresh_pml_linear_algebra" - | "math_refresh_pml_analysis" - | "math_refresh_pml_probability" - | "math_refresh_pml_applied_ml" - | "math_refresh_pml_geometry_3d" - | "math_refresh_pml_dynamics_physics" - | "math_refresh_pml_evaluation" | "culture_generale_sciences_physics" | "culture_generale_sciences_information_theory" | "culture_generale_sciences_biology_neuro" @@ -448,7 +427,6 @@ type ExpandableNavKey = | "culture_generale" | "cognitive_toolkit" | "behavioral_design" - | "math_refresh" | "chinese" | "cantonese" | "elite_freelance" @@ -466,7 +444,6 @@ type ExpandableNavKey = | "reference" | "harness_dev_tools" | "mighty_god_mode" - | "math_bridge" | "career_foundations" | "ingest" | "learning" @@ -500,7 +477,6 @@ const DEFAULT_EXPANDABLE_NAV_STATE: Record = { culture_generale: true, cognitive_toolkit: true, behavioral_design: true, - math_refresh: true, chinese: true, cantonese: true, elite_freelance: true, @@ -518,7 +494,6 @@ const DEFAULT_EXPANDABLE_NAV_STATE: Record = { reference: false, harness_dev_tools: true, mighty_god_mode: true, - math_bridge: true, career_foundations: true, ingest: false, learning: false, @@ -859,10 +834,6 @@ function read_expandable_nav_state(): Record { typeof parsed.behavioral_design === "boolean" ? parsed.behavioral_design : DEFAULT_EXPANDABLE_NAV_STATE.behavioral_design, - math_refresh: - typeof parsed.math_refresh === "boolean" - ? parsed.math_refresh - : DEFAULT_EXPANDABLE_NAV_STATE.math_refresh, chinese: typeof parsed.chinese === "boolean" ? parsed.chinese @@ -931,10 +902,6 @@ function read_expandable_nav_state(): Record { typeof parsed.mighty_god_mode === "boolean" ? parsed.mighty_god_mode : DEFAULT_EXPANDABLE_NAV_STATE.mighty_god_mode, - math_bridge: - typeof parsed.math_bridge === "boolean" - ? parsed.math_bridge - : DEFAULT_EXPANDABLE_NAV_STATE.math_bridge, career_foundations: typeof parsed.career_foundations === "boolean" ? parsed.career_foundations @@ -1135,189 +1102,161 @@ export function Sidebar({ : url_tab === "distributed_ml" ? "applied_systems_distributed_ml" : "applied_systems_llmops" - : url_section === "math-refresh" - ? url_track === "prepa_ml" - ? url_tab === "linear_algebra" - ? "math_refresh_pml_linear_algebra" - : url_tab === "analysis" - ? "math_refresh_pml_analysis" - : url_tab === "probability" - ? "math_refresh_pml_probability" - : url_tab === "applied_ml" - ? "math_refresh_pml_applied_ml" - : url_tab === "geometry_3d" - ? "math_refresh_pml_geometry_3d" - : url_tab === "dynamics_physics" - ? "math_refresh_pml_dynamics_physics" - : url_tab === "evaluation" - ? "math_refresh_pml_evaluation" - : "math_refresh_pml_methode" - : url_tab === "diagnostic" - ? "math_refresh_z2o_diagnostic" - : url_tab === "college" - ? "math_refresh_z2o_college" - : url_tab === "lycee" - ? "math_refresh_z2o_lycee" - : url_tab === "terminale" - ? "math_refresh_z2o_terminale" - : url_tab === "evaluation" - ? "math_refresh_z2o_evaluation" - : "math_refresh_z2o_methode" - : url_section === "elite-freelance" - ? url_tab === "realtime_systems" - ? "elite_freelance_realtime_systems" - : url_tab === "apis_at_scale" - ? "elite_freelance_apis_at_scale" - : url_tab === "ai_agent_infra" - ? "elite_freelance_ai_agent_infra" - : url_tab === "production_hardening" - ? "elite_freelance_production_hardening" - : url_tab === "positioning" - ? "elite_freelance_positioning" - : "elite_freelance_realtime_systems" - : url_section === "ai-engineering" - ? url_tab === "agents" - ? "ai_engineering_agents" - : url_tab === "evals" - ? "ai_engineering_evals" - : url_tab === "retrieval" - ? "ai_engineering_retrieval" + : url_section === "elite-freelance" + ? url_tab === "realtime_systems" + ? "elite_freelance_realtime_systems" + : url_tab === "apis_at_scale" + ? "elite_freelance_apis_at_scale" + : url_tab === "ai_agent_infra" + ? "elite_freelance_ai_agent_infra" + : url_tab === "production_hardening" + ? "elite_freelance_production_hardening" + : url_tab === "positioning" + ? "elite_freelance_positioning" + : "elite_freelance_realtime_systems" + : url_section === "ai-engineering" + ? url_tab === "agents" + ? "ai_engineering_agents" + : url_tab === "evals" + ? "ai_engineering_evals" + : url_tab === "retrieval" + ? "ai_engineering_retrieval" + : url_tab === "memory" + ? "ai_engineering_memory" + : url_tab === "fine_tuning" + ? "ai_engineering_fine_tuning" + : url_tab === "multimodal" + ? "ai_engineering_multimodal" + : url_tab === "reasoning" + ? "ai_engineering_reasoning" + : "ai_engineering_inference" + : url_section === "frontend-eng" + ? url_tab === "components" + ? "frontend_eng_components" + : url_tab === "data_layer" + ? "frontend_eng_data_layer" + : url_tab === "performance" + ? "frontend_eng_performance" + : url_tab === "typescript" + ? "frontend_eng_typescript" + : url_tab === "testing" + ? "frontend_eng_testing" + : url_tab === "architecture" + ? "frontend_eng_architecture" + : "frontend_eng_state" + : url_section === "gpu-for-ai" + ? url_tab === "cuda" + ? "gpu_for_ai_cuda" + : url_tab === "distributed" + ? "gpu_for_ai_distributed" : url_tab === "memory" - ? "ai_engineering_memory" - : url_tab === "fine_tuning" - ? "ai_engineering_fine_tuning" - : url_tab === "multimodal" - ? "ai_engineering_multimodal" - : url_tab === "reasoning" - ? "ai_engineering_reasoning" - : "ai_engineering_inference" - : url_section === "frontend-eng" - ? url_tab === "components" - ? "frontend_eng_components" - : url_tab === "data_layer" - ? "frontend_eng_data_layer" - : url_tab === "performance" - ? "frontend_eng_performance" - : url_tab === "typescript" - ? "frontend_eng_typescript" - : url_tab === "testing" - ? "frontend_eng_testing" - : url_tab === "architecture" - ? "frontend_eng_architecture" - : "frontend_eng_state" - : url_section === "gpu-for-ai" - ? url_tab === "cuda" - ? "gpu_for_ai_cuda" - : url_tab === "distributed" - ? "gpu_for_ai_distributed" - : url_tab === "memory" - ? "gpu_for_ai_memory" - : url_tab === "cloud" - ? "gpu_for_ai_cloud" - : url_tab === "profiling" - ? "gpu_for_ai_profiling" - : url_tab === "networking" - ? "gpu_for_ai_networking" - : url_tab === "alternatives" - ? "gpu_for_ai_alternatives" - : "gpu_for_ai_architecture" - : url_section === "embodied-ai" - ? url_tab === "humanoid" - ? "embodied_ai_humanoid" - : url_tab === "service" - ? "embodied_ai_service" - : url_tab === "autonomous" - ? "embodied_ai_autonomous" - : url_tab === "agentic" - ? "embodied_ai_agentic" - : url_tab === "edge_inference" - ? "embodied_ai_edge_inference" - : url_tab === "world_models" - ? "embodied_ai_world_models" - : "embodied_ai_core" - : url_section === "bio-augmentation" - ? url_tab === "neurotech" - ? "bio_augmentation_neurotech" - : url_tab === "wearables" - ? "bio_augmentation_wearables" - : url_tab === "biohacking" - ? "bio_augmentation_biohacking" - : url_tab === "translation" - ? "bio_augmentation_translation" - : url_tab === "convergence" - ? "bio_augmentation_convergence" - : "bio_augmentation_foundations" - : url_section === "chinese" + ? "gpu_for_ai_memory" + : url_tab === "cloud" + ? "gpu_for_ai_cloud" + : url_tab === "profiling" + ? "gpu_for_ai_profiling" + : url_tab === "networking" + ? "gpu_for_ai_networking" + : url_tab === "alternatives" + ? "gpu_for_ai_alternatives" + : "gpu_for_ai_architecture" + : url_section === "embodied-ai" + ? url_tab === "humanoid" + ? "embodied_ai_humanoid" + : url_tab === "service" + ? "embodied_ai_service" + : url_tab === "autonomous" + ? "embodied_ai_autonomous" + : url_tab === "agentic" + ? "embodied_ai_agentic" + : url_tab === "edge_inference" + ? "embodied_ai_edge_inference" + : url_tab === "world_models" + ? "embodied_ai_world_models" + : "embodied_ai_core" + : url_section === "bio-augmentation" + ? url_tab === "neurotech" + ? "bio_augmentation_neurotech" + : url_tab === "wearables" + ? "bio_augmentation_wearables" + : url_tab === "biohacking" + ? "bio_augmentation_biohacking" + : url_tab === "translation" + ? "bio_augmentation_translation" + : url_tab === "convergence" + ? "bio_augmentation_convergence" + : "bio_augmentation_foundations" + : url_section === "chinese" + ? url_tab === "vocab" + ? "chinese_vocab" + : url_tab === "lessons" + ? "chinese_lessons" + : url_tab === "review" + ? "chinese_review" + : "chinese_dashboard" + : url_section === "cantonese" ? url_tab === "vocab" - ? "chinese_vocab" + ? "cantonese_vocab" : url_tab === "lessons" - ? "chinese_lessons" + ? "cantonese_lessons" : url_tab === "review" - ? "chinese_review" - : "chinese_dashboard" - : url_section === "cantonese" - ? url_tab === "vocab" - ? "cantonese_vocab" - : url_tab === "lessons" - ? "cantonese_lessons" - : url_tab === "review" - ? "cantonese_review" - : "cantonese_dashboard" - : url_section === "culture-generale" - ? url_track === "humanites" - ? url_tab === "french_philo" - ? "culture_generale_humanites_french_philo" - : url_tab === "literature" - ? "culture_generale_humanites_literature" - : "culture_generale_humanites_philo_science" - : url_track === "sciences_sociales" - ? url_tab === "political_philo" - ? "culture_generale_sciences_sociales_political_philo" - : url_tab === "history" - ? "culture_generale_sciences_sociales_history" - : "culture_generale_sciences_sociales_economics" - : url_tab === "information_theory" - ? "culture_generale_sciences_information_theory" - : url_tab === "biology_neuro" - ? "culture_generale_sciences_biology_neuro" - : "culture_generale_sciences_physics" - : url_section === "cognitive-toolkit" - ? url_tab === "operating_system" - ? "cognitive_toolkit_operating_system" - : url_tab === "techniques" - ? "cognitive_toolkit_techniques" - : url_tab === "worldview" - ? "cognitive_toolkit_worldview" - : url_tab === "library" - ? "cognitive_toolkit_library" - : url_tab === "playbook" - ? "cognitive_toolkit_playbook" - : url_tab === "operators" - ? "cognitive_toolkit_operators" - : url_tab === "social_dynamics" - ? "cognitive_toolkit_social_dynamics" - : url_tab === "ai_leverage" - ? "cognitive_toolkit_ai_leverage" - : "cognitive_toolkit_foundation" - : url_section === "behavioral-design" - ? url_tab === "feed_design" - ? "behavioral_design_feed_design" - : url_tab === "social_loops" - ? "behavioral_design_social_loops" - : url_tab === "variable_rewards" - ? "behavioral_design_variable_rewards" - : url_tab === "friction" - ? "behavioral_design_friction" - : url_tab === "notifications" - ? "behavioral_design_notifications" - : url_tab === "gamification" - ? "behavioral_design_gamification" - : url_tab === "case_studies" - ? "behavioral_design_case_studies" - : "behavioral_design_frameworks" - : url_section === "databases" - ? get_databases_view(url_tab) - : get_prep_view(url_tab) + ? "cantonese_review" + : "cantonese_dashboard" + : url_section === "culture-generale" + ? url_track === "humanites" + ? url_tab === "french_philo" + ? "culture_generale_humanites_french_philo" + : url_tab === "literature" + ? "culture_generale_humanites_literature" + : "culture_generale_humanites_philo_science" + : url_track === "sciences_sociales" + ? url_tab === "political_philo" + ? "culture_generale_sciences_sociales_political_philo" + : url_tab === "history" + ? "culture_generale_sciences_sociales_history" + : "culture_generale_sciences_sociales_economics" + : url_tab === "information_theory" + ? "culture_generale_sciences_information_theory" + : url_tab === "biology_neuro" + ? "culture_generale_sciences_biology_neuro" + : "culture_generale_sciences_physics" + : url_section === "cognitive-toolkit" + ? url_tab === "operating_system" + ? "cognitive_toolkit_operating_system" + : url_tab === "techniques" + ? "cognitive_toolkit_techniques" + : url_tab === "worldview" + ? "cognitive_toolkit_worldview" + : url_tab === "library" + ? "cognitive_toolkit_library" + : url_tab === "playbook" + ? "cognitive_toolkit_playbook" + : url_tab === "operators" + ? "cognitive_toolkit_operators" + : url_tab === "social_dynamics" + ? "cognitive_toolkit_social_dynamics" + : url_tab === "ai_leverage" + ? "cognitive_toolkit_ai_leverage" + : "cognitive_toolkit_foundation" + : url_section === "behavioral-design" + ? url_tab === "feed_design" + ? "behavioral_design_feed_design" + : url_tab === "social_loops" + ? "behavioral_design_social_loops" + : url_tab === "variable_rewards" + ? "behavioral_design_variable_rewards" + : url_tab === "friction" + ? "behavioral_design_friction" + : url_tab === "notifications" + ? "behavioral_design_notifications" + : url_tab === "gamification" + ? "behavioral_design_gamification" + : url_tab === "case_studies" + ? "behavioral_design_case_studies" + : "behavioral_design_frameworks" + : url_section === "databases" + ? get_databases_view(url_tab) + : get_prep_view(url_tab) : pathname.startsWith("/harness-dev-tools") ? url_section === "mcp" ? url_tab === "docs" @@ -1662,455 +1601,383 @@ export function Sidebar({ "opportunities" ? "career_opportunities" : "career" - : pathname === - "/math" - ? "math_landing" - : pathname.startsWith( - "/math/bridge", - ) || - pathname.startsWith( - "/math-bridge", - ) - ? url_tab === - "core_numeracy" - ? "math_bridge_core_numeracy" + : pathname.startsWith( + "/career-foundations", + ) + ? url_tab === + "msc_refresh" + ? "career_foundations_msc" + : url_tab === + "assessment" + ? "career_foundations_assessment" : url_tab === - "high_school" - ? "math_bridge_high_school" + "ai_labs" + ? "career_foundations_ai_labs" : url_tab === - "pre_university" - ? "math_bridge_pre_university" - : url_tab === - "engineering_prep" - ? "math_bridge_engineering_prep" - : "math_bridge" - : pathname.startsWith( - "/math/refresh", - ) || - pathname.startsWith( - "/math-refresh", - ) - ? "math_refresh_active" - : pathname.startsWith( - "/career-foundations", - ) - ? url_tab === - "msc_refresh" - ? "career_foundations_msc" + "forward_deployed" + ? "career_foundations_fde" : url_tab === - "assessment" - ? "career_foundations_assessment" + "cloud_consulting" + ? "career_foundations_cloud" : url_tab === - "ai_labs" - ? "career_foundations_ai_labs" + "french_enterprise" + ? "career_foundations_french" : url_tab === - "forward_deployed" - ? "career_foundations_fde" + "adtech" + ? "career_foundations_adtech" : url_tab === - "cloud_consulting" - ? "career_foundations_cloud" + "faang" + ? "career_foundations_faang" : url_tab === - "french_enterprise" - ? "career_foundations_french" + "data_platform" + ? "career_foundations_platform" : url_tab === - "adtech" - ? "career_foundations_adtech" - : url_tab === - "faang" - ? "career_foundations_faang" - : url_tab === - "data_platform" - ? "career_foundations_platform" - : url_tab === - "supply_chain" - ? "career_foundations_supply_chain" - : "career_foundations" + "supply_chain" + ? "career_foundations_supply_chain" + : "career_foundations" + : pathname.startsWith( + "/high-performance", + ) + ? "high_performance" + : pathname.startsWith( + "/product-lab", + ) + ? "product_lab" + : pathname.startsWith( + "/apps", + ) + ? "my_apps" : pathname.startsWith( - "/high-performance", + "/brand-studio", ) - ? "high_performance" + ? url_tab === + "collections" + ? "brand_studio_collections" + : "brand_studio_assets" : pathname.startsWith( - "/product-lab", + "/events", ) - ? "product_lab" + ? "events" : pathname.startsWith( - "/apps", + "/reference-tracks", ) - ? "my_apps" + ? "reference_tracks" : pathname.startsWith( - "/brand-studio", + "/oss-projects", ) - ? url_tab === - "collections" - ? "brand_studio_collections" - : "brand_studio_assets" + ? "oss_projects" : pathname.startsWith( - "/events", + "/hf-projects", ) - ? "events" + ? "hf_projects" : pathname.startsWith( - "/reference-tracks", + "/elite-toolbox", ) - ? "reference_tracks" + ? "elite_toolbox" : pathname.startsWith( - "/oss-projects", + "/dev-ref", + ) + ? get_dev_ref_view( + url_tab, ) - ? "oss_projects" : pathname.startsWith( - "/hf-projects", + "/prep", + ) + ? get_prep_view( + url_tab, ) - ? "hf_projects" : pathname.startsWith( - "/elite-toolbox", + "/applied-systems", ) - ? "elite_toolbox" + ? url_tab === + "dataops" + ? "applied_systems_dataops" + : url_tab === + "recsys" + ? "applied_systems_recsys" + : url_tab === + "evals" + ? "applied_systems_evals" + : url_tab === + "worldmodels" + ? "applied_systems_worldmodels" + : url_tab === + "3d_vision" + ? "applied_systems_3d_vision" + : url_tab === + "distributed_ml" + ? "applied_systems_distributed_ml" + : "applied_systems_llmops" : pathname.startsWith( - "/dev-ref", - ) - ? get_dev_ref_view( - url_tab, + "/embodied-ai", ) + ? url_tab === + "humanoid" + ? "embodied_ai_humanoid" + : url_tab === + "service" + ? "embodied_ai_service" + : url_tab === + "autonomous" + ? "embodied_ai_autonomous" + : url_tab === + "agentic" + ? "embodied_ai_agentic" + : url_tab === + "edge_inference" + ? "embodied_ai_edge_inference" + : url_tab === + "world_models" + ? "embodied_ai_world_models" + : "embodied_ai_core" : pathname.startsWith( - "/prep", - ) - ? get_prep_view( - url_tab, + "/ai-engineering", ) + ? url_tab === + "agents" + ? "ai_engineering_agents" + : url_tab === + "evals" + ? "ai_engineering_evals" + : url_tab === + "retrieval" + ? "ai_engineering_retrieval" + : url_tab === + "memory" + ? "ai_engineering_memory" + : url_tab === + "fine_tuning" + ? "ai_engineering_fine_tuning" + : url_tab === + "multimodal" + ? "ai_engineering_multimodal" + : url_tab === + "reasoning" + ? "ai_engineering_reasoning" + : "ai_engineering_inference" : pathname.startsWith( - "/applied-systems", + "/frontend-eng", ) ? url_tab === - "dataops" - ? "applied_systems_dataops" + "components" + ? "frontend_eng_components" : url_tab === - "recsys" - ? "applied_systems_recsys" + "data_layer" + ? "frontend_eng_data_layer" : url_tab === - "evals" - ? "applied_systems_evals" + "performance" + ? "frontend_eng_performance" : url_tab === - "worldmodels" - ? "applied_systems_worldmodels" + "typescript" + ? "frontend_eng_typescript" : url_tab === - "3d_vision" - ? "applied_systems_3d_vision" + "testing" + ? "frontend_eng_testing" : url_tab === - "distributed_ml" - ? "applied_systems_distributed_ml" - : "applied_systems_llmops" + "architecture" + ? "frontend_eng_architecture" + : "frontend_eng_state" : pathname.startsWith( - "/embodied-ai", + "/agents", ) ? url_tab === - "humanoid" - ? "embodied_ai_humanoid" - : url_tab === - "service" - ? "embodied_ai_service" - : url_tab === - "autonomous" - ? "embodied_ai_autonomous" - : url_tab === - "agentic" - ? "embodied_ai_agentic" - : url_tab === - "edge_inference" - ? "embodied_ai_edge_inference" - : url_tab === - "world_models" - ? "embodied_ai_world_models" - : "embodied_ai_core" + "resources" + ? "agents_resources" + : "agents_roadmap" : pathname.startsWith( - "/ai-engineering", + "/tech-radar", ) ? url_tab === - "agents" - ? "ai_engineering_agents" + "blogs" + ? "tech_radar_blogs" : url_tab === - "evals" - ? "ai_engineering_evals" - : url_tab === - "retrieval" - ? "ai_engineering_retrieval" - : url_tab === - "memory" - ? "ai_engineering_memory" - : url_tab === - "fine_tuning" - ? "ai_engineering_fine_tuning" - : url_tab === - "multimodal" - ? "ai_engineering_multimodal" - : url_tab === - "reasoning" - ? "ai_engineering_reasoning" - : "ai_engineering_inference" + "tools" + ? "tech_radar_tools" + : "tech_radar_strategy" : pathname.startsWith( - "/frontend-eng", + "/mcp", ) ? url_tab === - "components" - ? "frontend_eng_components" - : url_tab === - "data_layer" - ? "frontend_eng_data_layer" - : url_tab === - "performance" - ? "frontend_eng_performance" - : url_tab === - "typescript" - ? "frontend_eng_typescript" - : url_tab === - "testing" - ? "frontend_eng_testing" - : url_tab === - "architecture" - ? "frontend_eng_architecture" - : "frontend_eng_state" + "docs" + ? "mcp_docs" + : "mcp_dashboard" : pathname.startsWith( - "/agents", + "/skills", ) - ? url_tab === - "resources" - ? "agents_resources" - : "agents_roadmap" + ? "skills" : pathname.startsWith( - "/tech-radar", + "/gpu-for-ai", ) ? url_tab === - "blogs" - ? "tech_radar_blogs" + "cuda" + ? "gpu_for_ai_cuda" : url_tab === - "tools" - ? "tech_radar_tools" - : "tech_radar_strategy" + "distributed" + ? "gpu_for_ai_distributed" + : url_tab === + "memory" + ? "gpu_for_ai_memory" + : url_tab === + "cloud" + ? "gpu_for_ai_cloud" + : url_tab === + "profiling" + ? "gpu_for_ai_profiling" + : url_tab === + "networking" + ? "gpu_for_ai_networking" + : url_tab === + "alternatives" + ? "gpu_for_ai_alternatives" + : "gpu_for_ai_architecture" : pathname.startsWith( - "/mcp", + "/bio-augmentation", ) ? url_tab === - "docs" - ? "mcp_docs" - : "mcp_dashboard" + "neurotech" + ? "bio_augmentation_neurotech" + : url_tab === + "wearables" + ? "bio_augmentation_wearables" + : url_tab === + "biohacking" + ? "bio_augmentation_biohacking" + : url_tab === + "translation" + ? "bio_augmentation_translation" + : url_tab === + "convergence" + ? "bio_augmentation_convergence" + : "bio_augmentation_foundations" : pathname.startsWith( - "/skills", + "/culture-generale", ) - ? "skills" + ? url_track === + "humanites" + ? url_tab === + "french_philo" + ? "culture_generale_humanites_french_philo" + : url_tab === + "literature" + ? "culture_generale_humanites_literature" + : "culture_generale_humanites_philo_science" + : url_track === + "sciences_sociales" + ? url_tab === + "political_philo" + ? "culture_generale_sciences_sociales_political_philo" + : url_tab === + "history" + ? "culture_generale_sciences_sociales_history" + : "culture_generale_sciences_sociales_economics" + : url_tab === + "information_theory" + ? "culture_generale_sciences_information_theory" + : url_tab === + "biology_neuro" + ? "culture_generale_sciences_biology_neuro" + : "culture_generale_sciences_physics" : pathname.startsWith( - "/gpu-for-ai", + "/cognitive-toolkit", ) ? url_tab === - "cuda" - ? "gpu_for_ai_cuda" + "operating_system" + ? "cognitive_toolkit_operating_system" : url_tab === - "distributed" - ? "gpu_for_ai_distributed" + "techniques" + ? "cognitive_toolkit_techniques" : url_tab === - "memory" - ? "gpu_for_ai_memory" + "worldview" + ? "cognitive_toolkit_worldview" : url_tab === - "cloud" - ? "gpu_for_ai_cloud" + "library" + ? "cognitive_toolkit_library" : url_tab === - "profiling" - ? "gpu_for_ai_profiling" + "playbook" + ? "cognitive_toolkit_playbook" : url_tab === - "networking" - ? "gpu_for_ai_networking" + "operators" + ? "cognitive_toolkit_operators" : url_tab === - "alternatives" - ? "gpu_for_ai_alternatives" - : "gpu_for_ai_architecture" + "social_dynamics" + ? "cognitive_toolkit_social_dynamics" + : url_tab === + "ai_leverage" + ? "cognitive_toolkit_ai_leverage" + : "cognitive_toolkit_foundation" : pathname.startsWith( - "/bio-augmentation", + "/behavioral-design", ) ? url_tab === - "neurotech" - ? "bio_augmentation_neurotech" + "feed_design" + ? "behavioral_design_feed_design" : url_tab === - "wearables" - ? "bio_augmentation_wearables" + "social_loops" + ? "behavioral_design_social_loops" : url_tab === - "biohacking" - ? "bio_augmentation_biohacking" - : url_tab === - "translation" - ? "bio_augmentation_translation" - : url_tab === - "convergence" - ? "bio_augmentation_convergence" - : "bio_augmentation_foundations" - : pathname.startsWith( - "/math-refresh", - ) - ? url_track === - "prepa_ml" - ? url_tab === - "linear_algebra" - ? "math_refresh_pml_linear_algebra" + "variable_rewards" + ? "behavioral_design_variable_rewards" : url_tab === - "analysis" - ? "math_refresh_pml_analysis" + "friction" + ? "behavioral_design_friction" : url_tab === - "probability" - ? "math_refresh_pml_probability" + "notifications" + ? "behavioral_design_notifications" : url_tab === - "applied_ml" - ? "math_refresh_pml_applied_ml" + "gamification" + ? "behavioral_design_gamification" : url_tab === - "geometry_3d" - ? "math_refresh_pml_geometry_3d" - : url_tab === - "dynamics_physics" - ? "math_refresh_pml_dynamics_physics" - : url_tab === - "evaluation" - ? "math_refresh_pml_evaluation" - : "math_refresh_pml_methode" + "case_studies" + ? "behavioral_design_case_studies" + : "behavioral_design_frameworks" + : pathname.startsWith( + "/elite-freelance", + ) + ? url_tab === + "realtime_systems" + ? "elite_freelance_realtime_systems" : url_tab === - "diagnostic" - ? "math_refresh_z2o_diagnostic" + "apis_at_scale" + ? "elite_freelance_apis_at_scale" : url_tab === - "college" - ? "math_refresh_z2o_college" + "ai_agent_infra" + ? "elite_freelance_ai_agent_infra" : url_tab === - "lycee" - ? "math_refresh_z2o_lycee" + "production_hardening" + ? "elite_freelance_production_hardening" : url_tab === - "terminale" - ? "math_refresh_z2o_terminale" - : url_tab === - "evaluation" - ? "math_refresh_z2o_evaluation" - : "math_refresh_z2o_methode" + "positioning" + ? "elite_freelance_positioning" + : "elite_freelance_realtime_systems" : pathname.startsWith( - "/culture-generale", + "/tooling", ) - ? url_track === - "humanites" - ? url_tab === - "french_philo" - ? "culture_generale_humanites_french_philo" - : url_tab === - "literature" - ? "culture_generale_humanites_literature" - : "culture_generale_humanites_philo_science" - : url_track === - "sciences_sociales" - ? url_tab === - "political_philo" - ? "culture_generale_sciences_sociales_political_philo" - : url_tab === - "history" - ? "culture_generale_sciences_sociales_history" - : "culture_generale_sciences_sociales_economics" - : url_tab === - "information_theory" - ? "culture_generale_sciences_information_theory" - : url_tab === - "biology_neuro" - ? "culture_generale_sciences_biology_neuro" - : "culture_generale_sciences_physics" - : pathname.startsWith( - "/cognitive-toolkit", - ) - ? url_tab === - "operating_system" - ? "cognitive_toolkit_operating_system" - : url_tab === - "techniques" - ? "cognitive_toolkit_techniques" - : url_tab === - "worldview" - ? "cognitive_toolkit_worldview" - : url_tab === - "library" - ? "cognitive_toolkit_library" - : url_tab === - "playbook" - ? "cognitive_toolkit_playbook" - : url_tab === - "operators" - ? "cognitive_toolkit_operators" - : url_tab === - "social_dynamics" - ? "cognitive_toolkit_social_dynamics" - : url_tab === - "ai_leverage" - ? "cognitive_toolkit_ai_leverage" - : "cognitive_toolkit_foundation" - : pathname.startsWith( - "/behavioral-design", - ) - ? url_tab === - "feed_design" - ? "behavioral_design_feed_design" - : url_tab === - "social_loops" - ? "behavioral_design_social_loops" - : url_tab === - "variable_rewards" - ? "behavioral_design_variable_rewards" - : url_tab === - "friction" - ? "behavioral_design_friction" - : url_tab === - "notifications" - ? "behavioral_design_notifications" - : url_tab === - "gamification" - ? "behavioral_design_gamification" - : url_tab === - "case_studies" - ? "behavioral_design_case_studies" - : "behavioral_design_frameworks" - : pathname.startsWith( - "/elite-freelance", + ? "tooling" + : pathname === + "/learn" + ? "learn_concepts" + : pathname === + "/learn/tracks" + ? "curriculum_tracks" + : pathname.match( + /^\/learn\/tracks\/[^/]+\/modules\//, ) - ? url_tab === - "realtime_systems" - ? "elite_freelance_realtime_systems" - : url_tab === - "apis_at_scale" - ? "elite_freelance_apis_at_scale" - : url_tab === - "ai_agent_infra" - ? "elite_freelance_ai_agent_infra" - : url_tab === - "production_hardening" - ? "elite_freelance_production_hardening" - : url_tab === - "positioning" - ? "elite_freelance_positioning" - : "elite_freelance_realtime_systems" - : pathname.startsWith( - "/tooling", + ? "curriculum_module_detail" + : pathname.match( + /^\/learn\/tracks\/[^/]+$/, ) - ? "tooling" + ? "curriculum_track_detail" : pathname === - "/learn" - ? "learn_concepts" + "/learn/lenses" + ? "learn_lenses" : pathname === - "/learn/tracks" - ? "curriculum_tracks" - : pathname.match( - /^\/learn\/tracks\/[^/]+\/modules\//, + "/learn/levels" + ? "learn_levels" + : pathname.startsWith( + "/learn/", ) - ? "curriculum_module_detail" - : pathname.match( - /^\/learn\/tracks\/[^/]+$/, - ) - ? "curriculum_track_detail" - : pathname === - "/learn/lenses" - ? "learn_lenses" - : pathname === - "/learn/levels" - ? "learn_levels" - : pathname.startsWith( - "/learn/", - ) - ? "learn_concept_detail" - : "none"; + ? "learn_concept_detail" + : "none"; const current_conversation_id = active_view === "chat" ? (params.conversationId ?? null) : null; @@ -2355,7 +2222,6 @@ export function Sidebar({ active_view.startsWith("curriculum_") || active_view.startsWith("chinese_") || active_view.startsWith("cantonese_") || - active_view.startsWith("math_") || active_view.startsWith("culture_generale_") || active_view.startsWith("cognitive_toolkit_") || active_view.startsWith("behavioral_design_") @@ -2416,16 +2282,6 @@ export function Sidebar({ active={active_view.startsWith("cantonese_")} onClick={() => navigate({ to: "/cantonese" })} /> - navigate({ to: "/math" })} - /> = { linkedin_events: { label: "text-white/50", dot: "bg-white/30" }, dev_reference: { label: "text-white/50", dot: "bg-white/30" }, interview_prep: { label: "text-pink-400/70", dot: "bg-pink-400/50" }, - math_refresh: { label: "text-cyan-400/70", dot: "bg-cyan-400/50" }, chinese: { label: "text-red-400/70", dot: "bg-red-400/50" }, culture_generale: { label: "text-purple-400/70", dot: "bg-purple-400/50" }, applied_systems: { label: "text-indigo-400/70", dot: "bg-indigo-400/50" }, @@ -87,7 +85,6 @@ const WORKFLOW_ICON_BY_ID: Record = { linkedin_events: Users, dev_reference: Code2, interview_prep: GraduationCap, - math_refresh: Calculator, chinese: Languages, culture_generale: BookOpen, applied_systems: Cpu, diff --git a/frontend/src/components/landing/sections/WhyAdrienSection.tsx b/frontend/src/components/landing/sections/WhyAdrienSection.tsx index f1b4f5f..cdadc04 100644 --- a/frontend/src/components/landing/sections/WhyAdrienSection.tsx +++ b/frontend/src/components/landing/sections/WhyAdrienSection.tsx @@ -41,11 +41,6 @@ const BRAND_POINTS: readonly MissionPoint[] = [ ]; const TECH_POINTS: readonly MissionPoint[] = [ - { - goal: "I am doing Maths Zero to One for ML.", - how: "Maths drives fundamentals needed for ML intuition and execution.", - surfaces: ["Maths"], - }, { goal: "I am going deep on Data Engineering and ML Platforms for the technical side.", how: "Applied Systems, Engine, and Dev Ref provide the technical stack, references, and implementation tracks.", diff --git a/frontend/src/components/math-exercise/ExerciseCard.tsx b/frontend/src/components/math-exercise/ExerciseCard.tsx deleted file mode 100644 index 0b5f80c..0000000 --- a/frontend/src/components/math-exercise/ExerciseCard.tsx +++ /dev/null @@ -1,274 +0,0 @@ -import { useState, useCallback } from "react"; -import { motion, AnimatePresence } from "motion/react"; -import { Check, X } from "lucide-react"; - -import type { MathExercise, ExerciseResult } from "./exercise-types"; -import { check_qcm, check_numeric, check_expression } from "./answer-check"; -import { MathText } from "./MathRenderer"; -import { MathInput } from "./MathInput"; - -/* ------------------------------------------------------------------ */ -/* Props */ -/* ------------------------------------------------------------------ */ - -interface ExerciseCardProps { - exercise: MathExercise; - index: number; - onResult: (result: ExerciseResult) => void; -} - -/* ------------------------------------------------------------------ */ -/* Component */ -/* ------------------------------------------------------------------ */ - -export function ExerciseCard({ exercise, index, onResult }: ExerciseCardProps) { - const [selected_choice, set_selected_choice] = useState(null); - const [numeric_input, set_numeric_input] = useState(""); - const [expression_input, set_expression_input] = useState(""); - const [feedback, set_feedback] = useState<"correct" | "incorrect" | null>( - null, - ); - const [start_time] = useState(() => Date.now()); - - const submit = useCallback(() => { - if (feedback) return; // already submitted - - let correct = false; - - if (exercise.type === "qcm") { - if (selected_choice === null) return; - correct = check_qcm(selected_choice, exercise.correct_index); - } else if (exercise.type === "numeric") { - if (!numeric_input.trim()) return; - correct = check_numeric( - numeric_input, - exercise.expected, - exercise.tolerance, - ); - } else { - if (!expression_input.trim()) return; - correct = check_expression(expression_input, exercise.expected); - } - - set_feedback(correct ? "correct" : "incorrect"); - - // Auto-advance after showing feedback - setTimeout( - () => { - onResult({ - exercise_id: exercise.id, - correct, - time_ms: Date.now() - start_time, - }); - }, - correct ? 1200 : 2500, - ); - }, [ - exercise, - selected_choice, - numeric_input, - expression_input, - feedback, - onResult, - start_time, - ]); - - const can_submit = - feedback === null && - ((exercise.type === "qcm" && selected_choice !== null) || - (exercise.type === "numeric" && numeric_input.trim() !== "") || - (exercise.type === "expression" && expression_input.trim() !== "")); - - return ( - - - {/* Question */} -
-
- - {index + 1} - -
- -
-
-
- - {/* Input area */} -
- {exercise.type === "qcm" && ( - !feedback && set_selected_choice(i)} - /> - )} - - {exercise.type === "numeric" && ( - !feedback && set_numeric_input(e.target.value)} - onKeyDown={(e) => e.key === "Enter" && can_submit && submit()} - placeholder="Entrez votre reponse (ex: 7/12, 0.583, -3)" - disabled={feedback !== null} - className="w-full rounded-xl border border-white/[0.08] bg-white/[0.02] px-4 py-3 text-lg text-[#edf2fb] outline-none placeholder:text-[#8e99ac] disabled:opacity-50" - onFocus={(e) => { - e.currentTarget.style.borderColor = - "color-mix(in srgb, #55cdff 40%, transparent)"; - }} - onBlur={(e) => { - e.currentTarget.style.borderColor = ""; - }} - /> - )} - - {exercise.type === "expression" && ( -
- -
- )} -
- - {/* Submit button */} - {feedback === null && ( - - )} - - {/* Feedback */} - {feedback && ( - -
- {feedback === "correct" ? ( - - ) : ( - - )} - - {feedback === "correct" ? "Correct !" : "Incorrect"} - -
-

- -

-
- )} -
-
- ); -} - -/* ------------------------------------------------------------------ */ -/* QCM choices sub-component */ -/* ------------------------------------------------------------------ */ - -function QcmChoices({ - choices, - selected, - correct_index, - onSelect, -}: { - choices: string[]; - selected: number | null; - correct_index?: number; - onSelect: (index: number) => void; -}) { - return ( -
- {choices.map((choice, i) => { - let cls = "border-white/[0.08] bg-white/[0.02] hover:bg-white/[0.06]"; - let inlineStyle: React.CSSProperties | undefined; - - if (correct_index !== undefined) { - if (i === correct_index) { - cls = "border"; - inlineStyle = { - borderColor: "color-mix(in srgb, #55cdff 30%, transparent)", - background: "color-mix(in srgb, #55cdff 10%, transparent)", - }; - } else if (i === selected && i !== correct_index) { - cls = "border"; - inlineStyle = { - borderColor: "color-mix(in srgb, #eb5757 30%, transparent)", - background: "color-mix(in srgb, #eb5757 10%, transparent)", - }; - } else { - cls = "border-white/[0.04] bg-white/[0.01] opacity-50"; - } - } else if (i === selected) { - cls = "border"; - inlineStyle = { - borderColor: "color-mix(in srgb, #55cdff 30%, transparent)", - background: "color-mix(in srgb, #55cdff 10%, transparent)", - }; - } - - return ( - - ); - })} -
- ); -} diff --git a/frontend/src/components/math-exercise/ExerciseSession.tsx b/frontend/src/components/math-exercise/ExerciseSession.tsx deleted file mode 100644 index 204fbc0..0000000 --- a/frontend/src/components/math-exercise/ExerciseSession.tsx +++ /dev/null @@ -1,239 +0,0 @@ -import { useCallback, useEffect, useRef, useState } from "react"; -import { motion } from "motion/react"; -import { Check, Clock } from "lucide-react"; - -import type { - MathExercise, - ExerciseResult, - SessionResult, - TopicId, -} from "./exercise-types"; -import { ExerciseCard } from "./ExerciseCard"; - -/* ------------------------------------------------------------------ */ -/* Props */ -/* ------------------------------------------------------------------ */ - -interface ExerciseSessionProps { - topic: TopicId; - exercises: MathExercise[]; - onComplete: (result: SessionResult) => void; - onCancel: () => void; -} - -/* ------------------------------------------------------------------ */ -/* Helpers */ -/* ------------------------------------------------------------------ */ - -function shuffle(arr: T[]): T[] { - const copy = [...arr]; - for (let i = copy.length - 1; i > 0; i--) { - const j = Math.floor(Math.random() * (i + 1)); - [copy[i], copy[j]] = [copy[j], copy[i]]; - } - return copy; -} - -function format_time(seconds: number): string { - const m = Math.floor(seconds / 60); - const s = seconds % 60; - return `${m}:${s.toString().padStart(2, "0")}`; -} - -/* ------------------------------------------------------------------ */ -/* Progress bar */ -/* ------------------------------------------------------------------ */ - -function ProgressBar({ current, total }: { current: number; total: number }) { - const pct = total > 0 ? (current / total) * 100 : 0; - return ( -
-
- -
- - {current}/{total} - -
- ); -} - -/* ------------------------------------------------------------------ */ -/* Summary screen */ -/* ------------------------------------------------------------------ */ - -function SummaryScreen({ - result, - onDone, -}: { - result: SessionResult; - onDone: () => void; -}) { - return ( - -
-
-
- -
- -
-

- Session terminee ! -

-

Voici vos resultats.

-
- -
-
-

- {result.total} -

-

Exercices

-
-
-

- {result.accuracy_pct}% -

-

Precision

-
-
-

- {result.correct} -

-

Correct

-
-
-

- {format_time(result.duration_seconds)} -

-

Duree

-
-
- - -
-
-
- ); -} - -/* ------------------------------------------------------------------ */ -/* Main session component */ -/* ------------------------------------------------------------------ */ - -export function ExerciseSession({ - topic, - exercises, - onComplete, - onCancel, -}: ExerciseSessionProps) { - const [shuffled] = useState(() => shuffle(exercises)); - const [current_index, set_current_index] = useState(0); - const [results, set_results] = useState([]); - const [session_result, set_session_result] = useState( - null, - ); - const [elapsed, set_elapsed] = useState(0); - const start_time = useRef(0); - - // Timer - useEffect(() => { - if (session_result) return; - if (start_time.current === 0) start_time.current = Date.now(); - const interval = setInterval(() => { - set_elapsed(Math.floor((Date.now() - start_time.current) / 1000)); - }, 1000); - return () => clearInterval(interval); - }, [session_result]); - - const handle_result = useCallback( - (result: ExerciseResult) => { - const new_results = [...results, result]; - set_results(new_results); - - const next = current_index + 1; - if (next >= shuffled.length) { - // Session complete - const correct = new_results.filter((r) => r.correct).length; - const total = new_results.length; - const duration_seconds = Math.floor( - (Date.now() - start_time.current) / 1000, - ); - const sr: SessionResult = { - topic, - total, - correct, - accuracy_pct: total > 0 ? Math.round((correct / total) * 100) : 0, - duration_seconds, - date: new Date().toISOString().split("T")[0], - results: new_results, - }; - set_session_result(sr); - onComplete(sr); - } else { - set_current_index(next); - } - }, - [results, current_index, shuffled.length, topic, onComplete], - ); - - // Summary screen - if (session_result) { - return ; - } - - const current_exercise = shuffled[current_index]; - if (!current_exercise) return null; - - return ( -
- {/* Top bar */} -
-
- -
-
- - {format_time(elapsed)} -
- -
- - {/* Current exercise */} - -
- ); -} diff --git a/frontend/src/components/math-exercise/MathInput.tsx b/frontend/src/components/math-exercise/MathInput.tsx deleted file mode 100644 index 0bc7596..0000000 --- a/frontend/src/components/math-exercise/MathInput.tsx +++ /dev/null @@ -1,166 +0,0 @@ -import { Suspense, lazy, useEffect, useRef, useState } from "react"; - -/* ------------------------------------------------------------------ */ -/* Lazy MathLive loader */ -/* ------------------------------------------------------------------ */ - -// MathLive is ~1.5MB - we lazy-load it and fall back to a plain text input -// if it fails to load. The mathfield web component is registered globally -// on first import. - -const MathFieldInner = lazy(async () => { - try { - await import("mathlive"); - // Return a wrapper component that uses the web component - return { - default: MathFieldComponent, - }; - } catch { - return { - default: FallbackInput, - }; - } -}); - -/* ------------------------------------------------------------------ */ -/* MathField web component wrapper */ -/* ------------------------------------------------------------------ */ - -function MathFieldComponent({ value, onChange, placeholder }: MathInputProps) { - const container_ref = useRef(null); - const mf_ref = useRef(null); - - // Create the math-field element imperatively to avoid JSX type issues - useEffect(() => { - const container = container_ref.current; - if (!container) return; - - const el = document.createElement("math-field"); - Object.assign(el.style, { - width: "100%", - fontSize: "1.25rem", - padding: "0.75rem 1rem", - borderRadius: "0.75rem", - border: "1px solid rgba(255,255,255,0.08)", - backgroundColor: "rgba(255,255,255,0.02)", - color: "#edf2fb", - outline: "none", - }); - if (placeholder) el.setAttribute("placeholder", placeholder); - container.appendChild(el); - mf_ref.current = el; - - return () => { - container.removeChild(el); - mf_ref.current = null; - }; - }, [placeholder]); - - useEffect(() => { - const el = mf_ref.current; - if (!el) return; - - const handle_input = () => { - const mf = el as unknown as { value: string }; - onChange(mf.value ?? ""); - }; - - el.addEventListener("input", handle_input); - return () => el.removeEventListener("input", handle_input); - }, [onChange]); - - useEffect(() => { - const el = mf_ref.current; - if (!el) return; - const mf = el as unknown as { value: string }; - if (mf.value !== value) { - mf.value = value; - } - }, [value]); - - return
; -} - -/* ------------------------------------------------------------------ */ -/* Fallback plain text input */ -/* ------------------------------------------------------------------ */ - -function FallbackInput({ value, onChange, placeholder }: MathInputProps) { - return ( - onChange(e.target.value)} - placeholder={placeholder ?? "Enter LaTeX expression..."} - className="w-full rounded-xl border border-white/[0.08] bg-white/[0.02] px-4 py-3 text-lg text-[#edf2fb] outline-none placeholder:text-[#8e99ac] focus:border-[rgba(94,106,210,0.4)]" - /> - ); -} - -/* ------------------------------------------------------------------ */ -/* MathInput public component */ -/* ------------------------------------------------------------------ */ - -interface MathInputProps { - value: string; - onChange: (value: string) => void; - placeholder?: string; -} - -export function MathInput(props: MathInputProps) { - const [use_fallback, set_use_fallback] = useState(false); - - if (use_fallback) { - return ; - } - - return ( - -
-
- } - > - set_use_fallback(true)}> - - -
- ); -} - -/* ------------------------------------------------------------------ */ -/* Minimal error boundary */ -/* ------------------------------------------------------------------ */ - -import { Component, type ReactNode, type ErrorInfo } from "react"; - -interface ErrorBoundaryProps { - children: ReactNode; - onError: () => void; -} - -interface ErrorBoundaryState { - has_error: boolean; -} - -class ErrorBoundary extends Component { - constructor(props: ErrorBoundaryProps) { - super(props); - this.state = { has_error: false }; - } - - static getDerivedStateFromError(): ErrorBoundaryState { - return { has_error: true }; - } - - // eslint-disable-next-line @typescript-eslint/no-unused-vars - componentDidCatch(_error: Error, _info: ErrorInfo): void { - this.props.onError(); - } - - render(): ReactNode { - if (this.state.has_error) return null; - return this.props.children; - } -} diff --git a/frontend/src/components/math-exercise/PracticeTab.tsx b/frontend/src/components/math-exercise/PracticeTab.tsx deleted file mode 100644 index 385c098..0000000 --- a/frontend/src/components/math-exercise/PracticeTab.tsx +++ /dev/null @@ -1,217 +0,0 @@ -import { useCallback, useState } from "react"; -import { motion } from "motion/react"; -import { BookOpen, Loader2, Target, Trophy, Zap } from "lucide-react"; - -import type { - MathExercise, - SessionResult, - TopicId, - TrackId, -} from "./exercise-types"; -import { TOPIC_METAS } from "./exercise-types"; -import { ExerciseSession } from "./ExerciseSession"; -import { useMathProgress } from "../../hooks/useMathProgress"; - -/* ------------------------------------------------------------------ */ -/* Props */ -/* ------------------------------------------------------------------ */ - -interface PracticeTabProps { - track: TrackId; -} - -/* ------------------------------------------------------------------ */ -/* Mastery badge */ -/* ------------------------------------------------------------------ */ - -const MASTERY_CONFIG = { - not_started: { - label: "Pas commence", - color: "bg-white/[0.06] text-[#8e99ac]", - icon: BookOpen, - }, - learning: { - label: "En cours", - color: "bg-[rgba(94,106,210,0.1)] text-[#5e6ad2]", - icon: Zap, - }, - practicing: { - label: "Pratique", - color: "bg-white/[0.04] text-white/70", - icon: Target, - }, - mastered: { - label: "Maitrise", - color: "bg-[rgba(85,205,255,0.1)] text-[#55cdff]", - icon: Trophy, - }, -} as const; - -/* ------------------------------------------------------------------ */ -/* Main component */ -/* ------------------------------------------------------------------ */ - -export function PracticeTab({ track }: PracticeTabProps) { - const { get_topic_progress, record_session, get_overall_stats } = - useMathProgress(); - const [active_session, set_active_session] = useState<{ - topic: TopicId; - exercises: MathExercise[]; - } | null>(null); - const [loading, set_loading] = useState(false); - - const topics = TOPIC_METAS.filter((t) => t.track === track); - const overall = get_overall_stats(); - - const handle_start = useCallback((topic: TopicId) => { - set_loading(true); - import("./exercise-bank").then((mod) => { - const exercises = mod.get_session_exercises(topic, 10); - set_active_session({ topic, exercises }); - set_loading(false); - }); - }, []); - - const handle_complete = useCallback( - (result: SessionResult) => { - record_session(result); - }, - [record_session], - ); - - const handle_cancel = useCallback(() => { - set_active_session(null); - }, []); - - // Loading state - if (loading) { - return ( -
- -
- ); - } - - // Active session view - if (active_session) { - const exercises = active_session.exercises; - if (exercises.length === 0) { - return ( -
-

- Aucun exercice disponible pour ce sujet. -

- -
- ); - } - return ( - - ); - } - - // Dashboard view - return ( -
- {/* Overall stats */} -
- - - - -
- - {/* Topic grid */} -
-

- Choisissez un sujet -

-
- {topics.map((topic) => { - const progress = get_topic_progress(topic.id); - const config = MASTERY_CONFIG[progress.mastery_level]; - const Icon = config.icon; - const accuracy = - progress.total_attempted > 0 - ? Math.round( - (progress.total_correct / progress.total_attempted) * 100, - ) - : 0; - - return ( - handle_start(topic.id)} - className="flex flex-col gap-3 rounded-xl border border-white/[0.08] bg-white/[0.02] p-4 text-left transition-colors hover:bg-white/[0.04]" - > -
-

{topic.label}

- - - {config.label} - -
- - {progress.total_attempted > 0 ? ( -
-
- - {progress.total_attempted} exercices - - {accuracy}% -
-
-
-
- {progress.last_session_date && ( -

- Derniere session : {progress.last_session_date} -

- )} -
- ) : ( -

- Commencez une session de 10 exercices -

- )} - - ); - })} -
-
-
- ); -} - -/* ------------------------------------------------------------------ */ -/* Stat box */ -/* ------------------------------------------------------------------ */ - -function StatBox({ value, label }: { value: string; label: string }) { - return ( -
-

{value}

-

{label}

-
- ); -} diff --git a/frontend/src/components/math-exercise/answer-check.ts b/frontend/src/components/math-exercise/answer-check.ts deleted file mode 100644 index 78615a9..0000000 --- a/frontend/src/components/math-exercise/answer-check.ts +++ /dev/null @@ -1,140 +0,0 @@ -/* ------------------------------------------------------------------ */ -/* Answer validation logic */ -/* ------------------------------------------------------------------ */ - -import { Parser } from "expr-eval"; - -/** - * Check a QCM answer (simple index comparison). - */ -export function check_qcm(selected: number, correct: number): boolean { - return selected === correct; -} - -/** - * Parse a user input string into a number. - * Supports: integers, decimals, fractions ("7/12"), negative numbers. - * Returns NaN on failure. - */ -export function parse_numeric(input: string): number { - const trimmed = input.trim().replace(/\s/g, ""); - if (trimmed === "") return NaN; - - // Handle fractions: "7/12", "-3/4", "1/3" - const fraction_match = trimmed.match( - /^(-?\d+(?:\.\d+)?)\/(-?\d+(?:\.\d+)?)$/, - ); - if (fraction_match) { - const num = parseFloat(fraction_match[1]); - const den = parseFloat(fraction_match[2]); - if (den === 0) return NaN; - return num / den; - } - - // Handle decimals and integers - const value = parseFloat(trimmed); - return value; -} - -/** - * Check a numeric answer with optional tolerance. - */ -export function check_numeric( - input: string, - expected: number, - tolerance = 0.001, -): boolean { - const parsed = parse_numeric(input); - if (isNaN(parsed)) return false; - return Math.abs(parsed - expected) <= tolerance; -} - -/** - * Normalize a LaTeX expression for comparison. - * - Strips whitespace - * - Removes \left and \right - * - Normalizes common patterns - */ -function normalize_latex(expr: string): string { - return expr - .replace(/\\left/g, "") - .replace(/\\right/g, "") - .replace(/\s+/g, "") - .replace(/\{(\w)\}/g, "$1") // {x} -> x - .replace(/\*\*/g, "^") // ** -> ^ - .replace(/\+-/g, "-") // +- -> - - .replace(/-\+/g, "-"); // -+ -> - -} - -/** - * Evaluate a simple LaTeX math expression at a given variable value. - * Supports: +, -, *, /, ^, sqrt, sin, cos, tan, exp, ln, log, pi, e, abs. - * Converts LaTeX to expr-eval-compatible string (no JS execution). - */ -function latex_to_evaluable(latex: string): string { - let s = latex; - // Strip LaTeX formatting - s = s.replace(/\\left/g, "").replace(/\\right/g, ""); - s = s.replace(/\{/g, "(").replace(/\}/g, ")"); - s = s.replace(/\\frac\(([^)]+)\)\(([^)]+)\)/g, "(($1)/($2))"); - s = s.replace(/\\sqrt\(([^)]+)\)/g, "sqrt($1)"); - s = s.replace(/\\sin/g, "sin"); - s = s.replace(/\\cos/g, "cos"); - s = s.replace(/\\tan/g, "tan"); - s = s.replace(/\\exp/g, "exp"); - s = s.replace(/\\ln/g, "log"); - s = s.replace(/\\log/g, "log"); - s = s.replace(/\\pi/g, "PI"); - s = s.replace(/\\abs\(([^)]+)\)/g, "abs($1)"); - s = s.replace(/\|([^|]+)\|/g, "abs($1)"); - // Handle e as Euler's number only when standalone - s = s.replace(/(? 2*x - s = s.replace(/(\d)([a-zA-Z])/g, "$1*$2"); - return s; -} - -const _parser = new Parser(); - -function evaluate_at(expr: string, variable: string, value: number): number { - const parsed_expr = latex_to_evaluable(expr); - try { - return _parser.evaluate(parsed_expr, { [variable]: value }); - } catch { - return NaN; - } -} - -/** - * Check an expression answer by: - * 1. Trying normalized string comparison first - * 2. Falling back to point evaluation at multiple random values - */ -export function check_expression(input: string, expected: string): boolean { - // Quick check: normalized string match - if (normalize_latex(input) === normalize_latex(expected)) return true; - - // Point evaluation: try 5 random values for variable x - const test_values = [0.5, 1.0, 1.5, 2.0, 3.0]; - const variable = "x"; - - let all_match = true; - let any_evaluated = false; - - for (const val of test_values) { - const input_result = evaluate_at(input, variable, val); - const expected_result = evaluate_at(expected, variable, val); - - // Skip if either fails to evaluate (may not contain variable) - if (isNaN(input_result) || isNaN(expected_result)) continue; - if (!isFinite(input_result) || !isFinite(expected_result)) continue; - - any_evaluated = true; - if (Math.abs(input_result - expected_result) > 0.01) { - all_match = false; - break; - } - } - - return any_evaluated && all_match; -} diff --git a/frontend/src/components/math-exercise/exercise-bank.ts b/frontend/src/components/math-exercise/exercise-bank.ts deleted file mode 100644 index 88e07da..0000000 --- a/frontend/src/components/math-exercise/exercise-bank.ts +++ /dev/null @@ -1,1499 +0,0 @@ -import type { MathExercise, TopicId } from "./exercise-types"; - -/* ================================================================== */ -/* Exercise Bank */ -/* ~120 exercises across 8 topic blocks */ -/* Mix of QCM, numeric, and expression types per block */ -/* ================================================================== */ - -/* ------------------------------------------------------------------ */ -/* Diagnostic & Arithmetique */ -/* ------------------------------------------------------------------ */ - -const DIAGNOSTIC_EXERCISES: MathExercise[] = [ - { - id: "diag-001", - topic: "diagnostic", - type: "numeric", - difficulty: 1, - question: "Calculez : $3 + 2 \\times (4 - 1)^2$", - expected: 21, - explanation: - "$3 + 2 \\times 9 = 3 + 18 = 21$. Priorite : parentheses, puissances, multiplication, addition.", - }, - { - id: "diag-002", - topic: "diagnostic", - type: "numeric", - difficulty: 1, - question: "Calculez : $\\frac{7}{12} + \\frac{5}{8}$", - expected: 29 / 24, - tolerance: 0.001, - explanation: - "PPCM(12, 8) = 24. $\\frac{14}{24} + \\frac{15}{24} = \\frac{29}{24}$.", - }, - { - id: "diag-003", - topic: "diagnostic", - type: "qcm", - difficulty: 1, - question: "Quelle est la forme simplifiee de $\\frac{36}{48}$ ?", - choices: [ - "$\\frac{3}{4}$", - "$\\frac{6}{8}$", - "$\\frac{9}{12}$", - "$\\frac{4}{3}$", - ], - correct_index: 0, - explanation: "PGCD(36, 48) = 12. $\\frac{36}{48} = \\frac{3}{4}$.", - }, - { - id: "diag-004", - topic: "diagnostic", - type: "numeric", - difficulty: 1, - question: - "Convertissez $0.375$ en fraction simplifiee. Donnez le numerateur si le denominateur est 8.", - expected: 3, - explanation: "$0.375 = \\frac{375}{1000} = \\frac{3}{8}$. Numerateur = 3.", - }, - { - id: "diag-005", - topic: "diagnostic", - type: "qcm", - difficulty: 1, - question: "Quel est le resultat de $(-3) \\times (-7) + 4 \\times (-2)$ ?", - choices: ["$13$", "$29$", "$-13$", "$-29$"], - correct_index: 0, - explanation: - "$(-3)\\times(-7) = 21$ et $4\\times(-2) = -8$. $21 + (-8) = 13$.", - }, - { - id: "diag-006", - topic: "diagnostic", - type: "numeric", - difficulty: 2, - question: - "Si un article coute 80 euros et est solde a -25%, quel est le prix final en euros ?", - expected: 60, - explanation: - "Reduction : $80 \\times 0.25 = 20$. Prix final : $80 - 20 = 60$ euros.", - }, - { - id: "diag-007", - topic: "diagnostic", - type: "numeric", - difficulty: 2, - question: "Resolvez : $5x - 3 = 2x + 9$. Quelle est la valeur de $x$ ?", - expected: 4, - explanation: "$5x - 2x = 9 + 3$, soit $3x = 12$, donc $x = 4$.", - }, - { - id: "diag-008", - topic: "diagnostic", - type: "qcm", - difficulty: 1, - question: "Combien vaut $2^8$ ?", - choices: ["$128$", "$256$", "$512$", "$64$"], - correct_index: 1, - explanation: "$2^8 = 256$.", - }, - { - id: "diag-009", - topic: "diagnostic", - type: "numeric", - difficulty: 2, - question: - "Un train parcourt 240 km en 3 heures. Quelle est sa vitesse en km/h ?", - expected: 80, - explanation: "Vitesse = distance / temps = $240 / 3 = 80$ km/h.", - }, - { - id: "diag-010", - topic: "diagnostic", - type: "qcm", - difficulty: 2, - question: - "Quelle est la pente de la droite passant par $A(1, 3)$ et $B(4, 9)$ ?", - choices: ["$2$", "$3$", "$\\frac{1}{2}$", "$6$"], - correct_index: 0, - explanation: "Pente $= \\frac{9-3}{4-1} = \\frac{6}{3} = 2$.", - }, - { - id: "diag-011", - topic: "diagnostic", - type: "numeric", - difficulty: 1, - question: "Calculez : $15 \\times 15$", - expected: 225, - explanation: "$15^2 = 225$.", - }, - { - id: "diag-012", - topic: "diagnostic", - type: "qcm", - difficulty: 2, - question: - "Si $\\frac{x}{3} = \\frac{12}{9}$, quelle est la valeur de $x$ ?", - choices: ["$4$", "$3$", "$6$", "$36$"], - correct_index: 0, - explanation: "Produit en croix : $9x = 36$, donc $x = 4$.", - }, - { - id: "diag-013", - topic: "diagnostic", - type: "numeric", - difficulty: 2, - question: - "Decomposez $360$ en produit de facteurs premiers. Combien de facteurs 2 y a-t-il ?", - expected: 3, - explanation: "$360 = 2^3 \\times 3^2 \\times 5$. Il y a 3 facteurs 2.", - }, - { - id: "diag-014", - topic: "diagnostic", - type: "expression", - difficulty: 2, - question: - "Resolvez l'inequation $2x - 5 > 3$. Exprimez $x$ sous la forme $x > a$. Quel est $a$ ?", - expected: "4", - explanation: "$2x > 8$, donc $x > 4$.", - }, - { - id: "diag-015", - topic: "diagnostic", - type: "numeric", - difficulty: 1, - question: "Quel pourcentage represente 45 sur 180 ?", - expected: 25, - explanation: "$\\frac{45}{180} = 0.25 = 25\\%$.", - }, -]; - -/* ------------------------------------------------------------------ */ -/* College (6e-3e) */ -/* ------------------------------------------------------------------ */ - -const COLLEGE_EXERCISES: MathExercise[] = [ - { - id: "coll-001", - topic: "college", - type: "expression", - difficulty: 1, - question: "Developpez $(x + 3)^2$.", - expected: "x^2+6x+9", - explanation: "$(a+b)^2 = a^2 + 2ab + b^2$. Ici : $x^2 + 6x + 9$.", - }, - { - id: "coll-002", - topic: "college", - type: "expression", - difficulty: 2, - question: "Factorisez $x^2 - 9$.", - expected: "(x-3)(x+3)", - explanation: - "Identite remarquable $a^2 - b^2 = (a-b)(a+b)$. $x^2 - 9 = (x-3)(x+3)$.", - }, - { - id: "coll-003", - topic: "college", - type: "numeric", - difficulty: 2, - question: - "Dans un triangle rectangle, les cotes de l'angle droit mesurent 3 cm et 4 cm. Quelle est l'hypotenuse ?", - expected: 5, - explanation: "Pythagore : $c = \\sqrt{3^2 + 4^2} = \\sqrt{25} = 5$.", - }, - { - id: "coll-004", - topic: "college", - type: "qcm", - difficulty: 2, - question: - "Deux droites paralleles sont coupees par une secante. Si les segments interceptes sont 4, 6, 3, $x$. Par Thales, $x$ vaut :", - choices: ["$4.5$", "$2$", "$8$", "$3.5$"], - correct_index: 0, - explanation: - "Par Thales : $\\frac{4}{6} = \\frac{3}{x}$, donc $x = \\frac{6 \\times 3}{4} = 4.5$.", - }, - { - id: "coll-005", - topic: "college", - type: "numeric", - difficulty: 1, - question: - "Simplifiez : $3a + 2b - a + 5b$. Quel est le coefficient de $a$ ?", - expected: 2, - explanation: - "$3a - a = 2a$ et $2b + 5b = 7b$, donc $2a + 7b$. Coefficient de $a$ = 2.", - }, - { - id: "coll-006", - topic: "college", - type: "expression", - difficulty: 2, - question: "Factorisez $4x^2 + 12x + 9$.", - expected: "(2x+3)^2", - explanation: "$(2x)^2 + 2(2x)(3) + 3^2 = (2x+3)^2$.", - }, - { - id: "coll-007", - topic: "college", - type: "qcm", - difficulty: 2, - question: "La fonction $f(x) = 2x - 5$ est :", - choices: ["Affine", "Lineaire", "Constante", "Quadratique"], - correct_index: 0, - explanation: - "$f(x) = ax + b$ avec $a=2, b=-5$ est une fonction affine (lineaire si $b=0$).", - }, - { - id: "coll-008", - topic: "college", - type: "numeric", - difficulty: 2, - question: - "Resolvez le systeme : $x + y = 10$ et $x - y = 4$. Quelle est la valeur de $x$ ?", - expected: 7, - explanation: "Addition : $2x = 14$, donc $x = 7$ (et $y = 3$).", - }, - { - id: "coll-009", - topic: "college", - type: "qcm", - difficulty: 1, - question: - "La mediane d'une serie statistique ordonnee {2, 5, 7, 8, 12} est :", - choices: ["$7$", "$5$", "$8$", "$6.8$"], - correct_index: 0, - explanation: "5 valeurs, la mediane est la 3e valeur = 7.", - }, - { - id: "coll-010", - topic: "college", - type: "numeric", - difficulty: 2, - question: - "On lance un de equilibre a 6 faces. Quelle est la probabilite d'obtenir un nombre pair ? (en fraction decimale)", - expected: 0.5, - explanation: "Nombres pairs : {2, 4, 6}. $P = \\frac{3}{6} = 0.5$.", - }, - { - id: "coll-011", - topic: "college", - type: "expression", - difficulty: 2, - question: "Developpez $(2x - 1)(3x + 4)$.", - expected: "6x^2+5x-4", - explanation: "$6x^2 + 8x - 3x - 4 = 6x^2 + 5x - 4$.", - }, - { - id: "coll-012", - topic: "college", - type: "numeric", - difficulty: 2, - question: "Quel est $\\cos(60°)$ ? (donnez la valeur decimale)", - expected: 0.5, - explanation: "$\\cos(60°) = \\frac{1}{2} = 0.5$.", - }, - { - id: "coll-013", - topic: "college", - type: "qcm", - difficulty: 1, - question: "L'ecriture scientifique de $0.00045$ est :", - choices: [ - "$4.5 \\times 10^{-4}$", - "$45 \\times 10^{-5}$", - "$4.5 \\times 10^{-3}$", - "$0.45 \\times 10^{-3}$", - ], - correct_index: 0, - explanation: - "$0.00045 = 4.5 \\times 10^{-4}$. En ecriture scientifique : $a \\times 10^n$ avec $1 \\leq a < 10$.", - }, - { - id: "coll-014", - topic: "college", - type: "numeric", - difficulty: 2, - question: - "La fonction affine $f$ passe par $A(0, -2)$ et $B(3, 4)$. Quel est le coefficient directeur ?", - expected: 2, - explanation: "$a = \\frac{4-(-2)}{3-0} = \\frac{6}{3} = 2$.", - }, - { - id: "coll-015", - topic: "college", - type: "expression", - difficulty: 2, - question: "Factorisez $x^2 - 6x + 9$.", - expected: "(x-3)^2", - explanation: "$(x-3)^2 = x^2 - 6x + 9$. Identite remarquable $(a-b)^2$.", - }, - { - id: "coll-016", - topic: "college", - type: "numeric", - difficulty: 1, - question: "Calculez $\\sqrt{144}$.", - expected: 12, - explanation: "$12 \\times 12 = 144$, donc $\\sqrt{144} = 12$.", - }, - { - id: "coll-017", - topic: "college", - type: "qcm", - difficulty: 2, - question: - "Un triangle a des cotes de longueurs 5, 12 et 13. Est-il rectangle ?", - choices: [ - "Oui, car $5^2 + 12^2 = 13^2$", - "Non", - "On ne peut pas savoir", - "Oui, car $5 + 12 = 17$", - ], - correct_index: 0, - explanation: - "$25 + 144 = 169 = 13^2$. La reciproque de Pythagore confirme que le triangle est rectangle.", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Lycee (Seconde-Premiere) */ -/* ------------------------------------------------------------------ */ - -const LYCEE_EXERCISES: MathExercise[] = [ - { - id: "lyc-001", - topic: "lycee", - type: "qcm", - difficulty: 2, - question: "La fonction $f(x) = \\frac{1}{x}$ est definie sur :", - choices: [ - "$\\mathbb{R}^*$", - "$\\mathbb{R}$", - "$\\mathbb{R}^+$", - "$[0, +\\infty[$", - ], - correct_index: 0, - explanation: - "La fonction inverse est definie pour tout $x \\neq 0$, soit $\\mathbb{R}^*$.", - }, - { - id: "lyc-002", - topic: "lycee", - type: "expression", - difficulty: 2, - question: "Derivez $f(x) = 3x^2 - 4x + 1$.", - expected: "6x-4", - explanation: "$f'(x) = 6x - 4$.", - }, - { - id: "lyc-003", - topic: "lycee", - type: "numeric", - difficulty: 2, - question: - "La derivee de $f(x) = x^3 - 3x$ vaut 0 en $x = 1$. Quelle est la valeur de $f(1)$ ?", - expected: -2, - explanation: "$f(1) = 1 - 3 = -2$.", - }, - { - id: "lyc-004", - topic: "lycee", - type: "qcm", - difficulty: 2, - question: "La suite $u_n = 3 + 2n$ est :", - choices: [ - "Arithmetique de raison 2", - "Geometrique de raison 2", - "Arithmetique de raison 3", - "Ni l'un ni l'autre", - ], - correct_index: 0, - explanation: - "$u_{n+1} - u_n = 2$, c'est une suite arithmetique de raison 2.", - }, - { - id: "lyc-005", - topic: "lycee", - type: "numeric", - difficulty: 2, - question: - "Quelle est la somme des 100 premiers termes de la suite $u_n = 2n + 1$ (pour $n = 0, 1, ..., 99$) ?", - expected: 10000, - explanation: - "$S = \\sum_{n=0}^{99}(2n+1) = 2\\frac{99 \\times 100}{2} + 100 = 9900 + 100 = 10000$.", - }, - { - id: "lyc-006", - topic: "lycee", - type: "qcm", - difficulty: 2, - question: "La valeur de $\\sin(\\frac{\\pi}{6})$ est :", - choices: [ - "$\\frac{1}{2}$", - "$\\frac{\\sqrt{2}}{2}$", - "$\\frac{\\sqrt{3}}{2}$", - "$1$", - ], - correct_index: 0, - explanation: "$\\sin(\\frac{\\pi}{6}) = \\sin(30°) = \\frac{1}{2}$.", - }, - { - id: "lyc-007", - topic: "lycee", - type: "expression", - difficulty: 2, - question: - "Trouvez l'equation de la tangente a $f(x) = x^2$ au point $x = 3$.", - expected: "6x-9", - explanation: - "$f'(x) = 2x$, $f'(3) = 6$, $f(3) = 9$. Tangente : $y = 6(x-3) + 9 = 6x - 9$.", - }, - { - id: "lyc-008", - topic: "lycee", - type: "numeric", - difficulty: 2, - question: "La suite geometrique $u_n = 3 \\times 2^n$. Quelle est $u_5$ ?", - expected: 96, - explanation: "$u_5 = 3 \\times 2^5 = 3 \\times 32 = 96$.", - }, - { - id: "lyc-009", - topic: "lycee", - type: "expression", - difficulty: 3, - question: "Derivez $f(x) = (2x+1)^3$.", - expected: "6(2x+1)^2", - explanation: "Composee : $f'(x) = 3(2x+1)^2 \\times 2 = 6(2x+1)^2$.", - }, - { - id: "lyc-010", - topic: "lycee", - type: "numeric", - difficulty: 2, - question: "Resolvez $|2x - 3| = 5$. Donnez la plus grande solution.", - expected: 4, - explanation: - "$2x - 3 = 5 \\Rightarrow x = 4$ ou $2x - 3 = -5 \\Rightarrow x = -1$. Plus grande : 4.", - }, - { - id: "lyc-011", - topic: "lycee", - type: "qcm", - difficulty: 2, - question: - "Le produit scalaire $\\vec{u} \\cdot \\vec{v}$ avec $\\vec{u}(3, 1)$ et $\\vec{v}(2, -4)$ vaut :", - choices: ["$2$", "$10$", "$-2$", "$6$"], - correct_index: 0, - explanation: - "$\\vec{u} \\cdot \\vec{v} = 3 \\times 2 + 1 \\times (-4) = 6 - 4 = 2$.", - }, - { - id: "lyc-012", - topic: "lycee", - type: "numeric", - difficulty: 2, - question: - "Calculez $\\cos(\\frac{\\pi}{4})$ (valeur decimale arrondie au millieme).", - expected: 0.707, - tolerance: 0.001, - explanation: - "$\\cos(\\frac{\\pi}{4}) = \\frac{\\sqrt{2}}{2} \\approx 0.707$.", - }, - { - id: "lyc-013", - topic: "lycee", - type: "expression", - difficulty: 2, - question: "Derivez $f(x) = \\frac{x}{x+1}$.", - expected: "\\frac{1}{(x+1)^2}", - explanation: - "Quotient : $f'(x) = \\frac{(x+1) - x}{(x+1)^2} = \\frac{1}{(x+1)^2}$.", - }, - { - id: "lyc-014", - topic: "lycee", - type: "numeric", - difficulty: 3, - question: - "La suite $(u_n)$ est definie par $u_0 = 1$ et $u_{n+1} = \\frac{u_n}{2} + 1$. Quelle est $u_2$ ?", - expected: 1.75, - explanation: - "$u_1 = \\frac{1}{2} + 1 = 1.5$, $u_2 = \\frac{1.5}{2} + 1 = 1.75$.", - }, - { - id: "lyc-015", - topic: "lycee", - type: "expression", - difficulty: 3, - question: "Derivez $f(x) = x\\sqrt{x}$ (ecrivez comme $f(x) = x^{3/2}$).", - expected: "\\frac{3}{2}\\sqrt{x}", - explanation: - "$f(x) = x^{3/2}$, $f'(x) = \\frac{3}{2}x^{1/2} = \\frac{3}{2}\\sqrt{x}$.", - }, - { - id: "lyc-016", - topic: "lycee", - type: "expression", - difficulty: 2, - question: - "Resolvez $\\cos(x) = \\frac{1}{2}$ sur $[0, 2\\pi]$. La plus petite solution est $x = $", - expected: "\\frac{\\pi}{3}", - explanation: - "$\\cos(x) = \\frac{1}{2}$ donne $x = \\frac{\\pi}{3}$ et $x = \\frac{5\\pi}{3}$ sur $[0, 2\\pi]$.", - }, - { - id: "lyc-017", - topic: "lycee", - type: "numeric", - difficulty: 2, - question: - "Calculez la variance de la serie {2, 4, 4, 4, 5, 5, 7, 9}. Moyenne = 5.", - expected: 4, - explanation: - "$V = \\frac{1}{8}[(2-5)^2 + 3(4-5)^2 + 2(5-5)^2 + (7-5)^2 + (9-5)^2] = \\frac{9+3+0+4+16}{8} = 4$.", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Terminale S */ -/* ------------------------------------------------------------------ */ - -const TERMINALE_EXERCISES: MathExercise[] = [ - { - id: "term-001", - topic: "terminale", - type: "expression", - difficulty: 2, - question: - "Calculez $\\lim_{x \\to +\\infty} \\frac{3x^2 - x + 1}{x^2 + 2}$.", - expected: "3", - explanation: - "On divise par $x^2$ : $\\frac{3 - 1/x + 1/x^2}{1 + 2/x^2} \\to 3$.", - }, - { - id: "term-002", - topic: "terminale", - type: "expression", - difficulty: 2, - question: "Calculez la primitive de $f(x) = 2x \\cdot e^{x^2}$.", - expected: "e^{x^2}", - explanation: "Si $F'(x) = 2x \\cdot e^{x^2}$, alors $F(x) = e^{x^2} + C$.", - }, - { - id: "term-003", - topic: "terminale", - type: "numeric", - difficulty: 2, - question: "Calculez $\\int_0^1 2x \\, dx$.", - expected: 1, - explanation: "$\\int_0^1 2x \\, dx = [x^2]_0^1 = 1 - 0 = 1$.", - }, - { - id: "term-004", - topic: "terminale", - type: "qcm", - difficulty: 2, - question: "La derivee de $e^{3x}$ est :", - choices: ["$3e^{3x}$", "$e^{3x}$", "$3xe^{3x-1}$", "$e^{3x+1}$"], - correct_index: 0, - explanation: "$(e^{u})' = u' \\cdot e^{u}$ avec $u = 3x$, $u' = 3$.", - }, - { - id: "term-005", - topic: "terminale", - type: "numeric", - difficulty: 2, - question: "Calculez $\\ln(e^5)$.", - expected: 5, - explanation: "$\\ln(e^x) = x$, donc $\\ln(e^5) = 5$.", - }, - { - id: "term-006", - topic: "terminale", - type: "expression", - difficulty: 3, - question: "Derivez $f(x) = x \\ln(x) - x$.", - expected: "\\ln(x)", - explanation: - "$f'(x) = \\ln(x) + x \\cdot \\frac{1}{x} - 1 = \\ln(x) + 1 - 1 = \\ln(x)$.", - }, - { - id: "term-007", - topic: "terminale", - type: "qcm", - difficulty: 2, - question: "Le module du nombre complexe $z = 3 + 4i$ est :", - choices: ["$5$", "$7$", "$\\sqrt{7}$", "$25$"], - correct_index: 0, - explanation: "$|z| = \\sqrt{3^2 + 4^2} = \\sqrt{25} = 5$.", - }, - { - id: "term-008", - topic: "terminale", - type: "numeric", - difficulty: 2, - question: "Calculez $\\int_0^{\\pi} \\sin(x) \\, dx$.", - expected: 2, - explanation: - "$\\int_0^{\\pi} \\sin(x) \\, dx = [-\\cos(x)]_0^{\\pi} = -\\cos(\\pi) + \\cos(0) = 1 + 1 = 2$.", - }, - { - id: "term-009", - topic: "terminale", - type: "expression", - difficulty: 3, - question: - "Ecrivez $z = 1 + i$ sous forme exponentielle $r e^{i\\theta}$. Quel est $r$ ?", - expected: "\\sqrt{2}", - explanation: - "$|z| = \\sqrt{1+1} = \\sqrt{2}$ et $\\theta = \\frac{\\pi}{4}$. Donc $z = \\sqrt{2}e^{i\\pi/4}$.", - }, - { - id: "term-010", - topic: "terminale", - type: "qcm", - difficulty: 2, - question: "$\\lim_{x \\to 0} \\frac{e^x - 1}{x}$ vaut :", - choices: ["$1$", "$0$", "$e$", "$+\\infty$"], - correct_index: 0, - explanation: - "Limite classique : $\\lim_{x \\to 0} \\frac{e^x - 1}{x} = 1$.", - }, - { - id: "term-011", - topic: "terminale", - type: "numeric", - difficulty: 3, - question: - "Calculez l'aire entre la courbe $y = x^2$ et l'axe des $x$ sur $[0, 2]$.", - expected: 8 / 3, - tolerance: 0.01, - explanation: - "$\\int_0^2 x^2 \\, dx = [\\frac{x^3}{3}]_0^2 = \\frac{8}{3} \\approx 2.667$.", - }, - { - id: "term-012", - topic: "terminale", - type: "expression", - difficulty: 2, - question: "Resolvez l'equation differentielle $y' = 2y$ avec $y(0) = 3$.", - expected: "3e^{2x}", - explanation: - "Solution : $y = Ce^{2x}$. Avec $y(0) = 3$ : $C = 3$, donc $y = 3e^{2x}$.", - }, - { - id: "term-013", - topic: "terminale", - type: "qcm", - difficulty: 2, - question: "La forme exponentielle de $z = -1$ est :", - choices: ["$e^{i\\pi}$", "$e^{-i\\pi}$", "$e^{i\\pi/2}$", "$-e^{0}$"], - correct_index: 0, - explanation: "$-1 = e^{i\\pi}$ (formule d'Euler : $e^{i\\pi} + 1 = 0$).", - }, - { - id: "term-014", - topic: "terminale", - type: "numeric", - difficulty: 2, - question: "Calculez $(1+i)(1-i)$ (partie reelle du resultat).", - expected: 2, - explanation: "$(1+i)(1-i) = 1 - i^2 = 1 + 1 = 2$.", - }, - { - id: "term-015", - topic: "terminale", - type: "numeric", - difficulty: 3, - question: - "Avec $X \\sim \\mathcal{N}(0,1)$, $P(X \\leq 1.96) \\approx$ ? (arrondi au centieme)", - expected: 0.975, - tolerance: 0.005, - explanation: "Table de la loi normale : $P(X \\leq 1.96) \\approx 0.975$.", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Algebre Lineaire (Prepa ML) */ -/* ------------------------------------------------------------------ */ - -const LINEAR_ALGEBRA_EXERCISES: MathExercise[] = [ - { - id: "linalg-001", - topic: "linear_algebra", - type: "qcm", - difficulty: 2, - question: - "Parmi ces ensembles, lequel est un sous-espace vectoriel de $\\mathbb{R}^2$ ?", - choices: [ - "$\\{(x,y) : x + y = 0\\}$", - "$\\{(x,y) : x + y = 1\\}$", - "$\\{(x,y) : x \\geq 0\\}$", - "$\\{(x,y) : xy = 0\\}$", - ], - correct_index: 0, - explanation: - "Seul $x + y = 0$ passe par l'origine et est stable par combinaison lineaire.", - }, - { - id: "linalg-002", - topic: "linear_algebra", - type: "numeric", - difficulty: 2, - question: - "Calculez le determinant de $A = \\begin{pmatrix} 2 & 3 \\\\ 1 & 4 \\end{pmatrix}$.", - expected: 5, - explanation: "$\\det(A) = 2 \\times 4 - 3 \\times 1 = 8 - 3 = 5$.", - }, - { - id: "linalg-003", - topic: "linear_algebra", - type: "numeric", - difficulty: 2, - question: - "Soit $A = \\begin{pmatrix} 1 & 2 \\\\ 3 & 4 \\end{pmatrix}$ et $B = \\begin{pmatrix} 5 & 6 \\\\ 7 & 8 \\end{pmatrix}$. Quel est l'element $(1,1)$ de $AB$ ?", - expected: 19, - explanation: "$(AB)_{11} = 1 \\times 5 + 2 \\times 7 = 5 + 14 = 19$.", - }, - { - id: "linalg-004", - topic: "linear_algebra", - type: "qcm", - difficulty: 2, - question: - "Les vecteurs $(1, 0, 1)$ et $(0, 1, 1)$ et $(1, 1, 0)$ sont-ils libres dans $\\mathbb{R}^3$ ?", - choices: [ - "Oui, ils forment une base", - "Non, ils sont lies", - "On ne peut pas determiner", - "Oui mais ne forment pas une base", - ], - correct_index: 0, - explanation: - "Le determinant de la matrice formee par ces vecteurs vaut $-2 \\neq 0$, donc ils sont libres et forment une base de $\\mathbb{R}^3$.", - }, - { - id: "linalg-005", - topic: "linear_algebra", - type: "numeric", - difficulty: 2, - question: - "Le rang de $A = \\begin{pmatrix} 1 & 2 & 3 \\\\ 2 & 4 & 6 \\\\ 0 & 1 & 1 \\end{pmatrix}$ est :", - expected: 2, - explanation: - "La 2e ligne = 2 fois la 1ere. Apres elimination, il reste 2 pivots, donc rang = 2.", - }, - { - id: "linalg-006", - topic: "linear_algebra", - type: "qcm", - difficulty: 3, - question: - "La trace de $A = \\begin{pmatrix} 3 & 1 \\\\ 0 & 5 \\end{pmatrix}$ est egale a :", - choices: ["$8$", "$15$", "$3$", "$5$"], - correct_index: 0, - explanation: "La trace est la somme des elements diagonaux : $3 + 5 = 8$.", - }, - { - id: "linalg-007", - topic: "linear_algebra", - type: "numeric", - difficulty: 3, - question: - "Quelle est la plus grande valeur propre de $A = \\begin{pmatrix} 4 & 1 \\\\ 2 & 3 \\end{pmatrix}$ ?", - expected: 5, - explanation: - "$\\det(A - \\lambda I) = (4-\\lambda)(3-\\lambda) - 2 = \\lambda^2 - 7\\lambda + 10 = (\\lambda-5)(\\lambda-2)$. Valeurs propres : 5 et 2.", - }, - { - id: "linalg-008", - topic: "linear_algebra", - type: "qcm", - difficulty: 2, - question: - "Le theoreme du rang dit que pour $f: E \\to F$ lineaire, $\\dim(E) = $ :", - choices: [ - "$\\dim(\\ker f) + \\dim(\\text{Im} f)$", - "$\\dim(\\ker f) \\times \\dim(\\text{Im} f)$", - "$\\dim(\\ker f) - \\dim(\\text{Im} f)$", - "$\\dim(F)$", - ], - correct_index: 0, - explanation: - "Theoreme du rang : $\\dim(E) = \\dim(\\ker f) + \\dim(\\text{Im} f)$.", - }, - { - id: "linalg-009", - topic: "linear_algebra", - type: "numeric", - difficulty: 2, - question: "Calculez la norme du vecteur $(3, 4)$ dans $\\mathbb{R}^2$.", - expected: 5, - explanation: "$\\|v\\| = \\sqrt{3^2 + 4^2} = \\sqrt{25} = 5$.", - }, - { - id: "linalg-010", - topic: "linear_algebra", - type: "numeric", - difficulty: 3, - question: - "Calculez le produit scalaire de $u = (1, 2, 3)$ et $v = (4, -5, 6)$.", - expected: 12, - explanation: "$u \\cdot v = 4 + (-10) + 18 = 12$.", - }, - { - id: "linalg-011", - topic: "linear_algebra", - type: "qcm", - difficulty: 3, - question: "Une matrice symetrique reelle est toujours :", - choices: [ - "Diagonalisable en base orthonormee", - "Inversible", - "Definie positive", - "Triangulaire", - ], - correct_index: 0, - explanation: - "Theoreme spectral : toute matrice symetrique reelle est diagonalisable dans une base orthonormee.", - }, - { - id: "linalg-012", - topic: "linear_algebra", - type: "numeric", - difficulty: 2, - question: - "Soit $A$ une matrice $3 \\times 3$ avec $\\det(A) = 4$. Que vaut $\\det(2A)$ ?", - expected: 32, - explanation: - "$\\det(cA) = c^n \\det(A)$ pour une matrice $n \\times n$. $\\det(2A) = 2^3 \\times 4 = 32$.", - }, - { - id: "linalg-013", - topic: "linear_algebra", - type: "expression", - difficulty: 2, - question: - "Le polynome caracteristique de $A = \\begin{pmatrix} 2 & 0 \\\\ 0 & 3 \\end{pmatrix}$ est :", - expected: "(\\lambda-2)(\\lambda-3)", - explanation: - "$\\det(A - \\lambda I) = (2-\\lambda)(3-\\lambda) = (\\lambda-2)(\\lambda-3)$.", - }, - { - id: "linalg-014", - topic: "linear_algebra", - type: "expression", - difficulty: 3, - question: - "La pseudo-inverse (Moore-Penrose) de $A$ est donnee par la formule des moindres carres : $A^+ = $", - expected: "(A^TA)^{-1}A^T", - explanation: - "Pour $A$ de rang plein en colonnes : $A^+ = (A^T A)^{-1} A^T$.", - }, - { - id: "linalg-015", - topic: "linear_algebra", - type: "qcm", - difficulty: 3, - question: - "La SVD de $A$ est $A = U\\Sigma V^T$. Les colonnes de $U$ sont :", - choices: [ - "Les vecteurs propres de $AA^T$", - "Les vecteurs propres de $A^TA$", - "Les vecteurs propres de $A$", - "Les valeurs singulieres de $A$", - ], - correct_index: 0, - explanation: - "Les colonnes de $U$ sont les vecteurs propres de $AA^T$, et celles de $V$ sont les vecteurs propres de $A^TA$.", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Analyse (Prepa ML) */ -/* ------------------------------------------------------------------ */ - -const ANALYSIS_EXERCISES: MathExercise[] = [ - { - id: "anal-001", - topic: "analysis", - type: "qcm", - difficulty: 2, - question: "La serie $\\sum \\frac{1}{n^2}$ est :", - choices: ["Convergente", "Divergente", "Alternee", "Indeterminee"], - correct_index: 0, - explanation: - "C'est une serie de Riemann avec $\\alpha = 2 > 1$, donc convergente.", - }, - { - id: "anal-002", - topic: "analysis", - type: "qcm", - difficulty: 2, - question: "La serie harmonique $\\sum \\frac{1}{n}$ est :", - choices: ["Divergente", "Convergente", "Alternee convergente", "Bornee"], - correct_index: 0, - explanation: "Serie de Riemann avec $\\alpha = 1$, donc divergente.", - }, - { - id: "anal-003", - topic: "analysis", - type: "expression", - difficulty: 2, - question: - "Calculez le gradient de $f(x, y) = x^2 y + \\sin(xy)$. La composante $\\frac{\\partial f}{\\partial x}$ est :", - expected: "2xy+y\\cos(xy)", - explanation: "$\\frac{\\partial f}{\\partial x} = 2xy + y\\cos(xy)$.", - }, - { - id: "anal-004", - topic: "analysis", - type: "numeric", - difficulty: 2, - question: "Le rayon de convergence de $\\sum \\frac{x^n}{n!}$ est :", - expected: 999999, - tolerance: 100000, - explanation: - "Par le critere de d'Alembert : $R = \\lim \\frac{a_n}{a_{n+1}} = \\lim (n+1) = +\\infty$. C'est le developpement de $e^x$.", - }, - { - id: "anal-005", - topic: "analysis", - type: "expression", - difficulty: 2, - question: - "Le developpement de Taylor de $e^x$ a l'ordre 3 autour de 0 est :", - expected: "1+x+\\frac{x^2}{2}+\\frac{x^3}{6}", - explanation: "$e^x = 1 + x + \\frac{x^2}{2!} + \\frac{x^3}{3!} + ...$", - }, - { - id: "anal-006", - topic: "analysis", - type: "qcm", - difficulty: 3, - question: - "Une fonction $f: \\mathbb{R}^n \\to \\mathbb{R}$ est convexe si et seulement si :", - choices: [ - "Sa matrice hessienne est semi-definie positive partout", - "Son gradient est nul en un point", - "Elle est continue et derivable", - "Elle admet un maximum global", - ], - correct_index: 0, - explanation: - "Critere du second ordre pour la convexite : $H_f \\succeq 0$ partout.", - }, - { - id: "anal-007", - topic: "analysis", - type: "numeric", - difficulty: 2, - question: "Calculez $\\int_0^1 \\int_0^1 xy \\, dx \\, dy$.", - expected: 0.25, - explanation: - "$\\int_0^1 \\int_0^1 xy \\, dx \\, dy = \\int_0^1 y [\\frac{x^2}{2}]_0^1 dy = \\int_0^1 \\frac{y}{2} dy = \\frac{1}{4}$.", - }, - { - id: "anal-008", - topic: "analysis", - type: "numeric", - difficulty: 3, - question: - "L'integrale de Gauss : $\\int_{-\\infty}^{+\\infty} e^{-x^2} dx = \\sqrt{\\pi}$. Arrondissez au centieme.", - expected: 1.77, - tolerance: 0.01, - explanation: "$\\sqrt{\\pi} \\approx 1.7725$.", - }, - { - id: "anal-009", - topic: "analysis", - type: "qcm", - difficulty: 2, - question: "Le gradient indique :", - choices: [ - "La direction de plus forte croissance de $f$", - "La direction de plus forte decroissance", - "La direction tangente a une courbe de niveau", - "Le minimum de $f$", - ], - correct_index: 0, - explanation: - "Le gradient pointe dans la direction de plus forte croissance.", - }, - { - id: "anal-010", - topic: "analysis", - type: "expression", - difficulty: 3, - question: - "Calculez le gradient de la MSE loss $L(w) = \\frac{1}{n}||Xw - y||^2$. $\\nabla_w L = $", - expected: "\\frac{2}{n}X^T(Xw-y)", - explanation: - "$\\nabla_w L = \\frac{2}{n}X^T(Xw - y)$. C'est la base du gradient descent pour la regression lineaire.", - }, - { - id: "anal-011", - topic: "analysis", - type: "qcm", - difficulty: 3, - question: - "Pour une fonction $L$-smooth convexe, le gradient descent converge a un taux de :", - choices: ["$O(1/k)$", "$O(1/k^2)$", "$O(e^{-k})$", "$O(1/\\sqrt{k})$"], - correct_index: 0, - explanation: - "Pour une fonction convexe $L$-smooth, GD converge en $O(1/k)$. Avec forte convexite : $O(e^{-k})$.", - }, - { - id: "anal-012", - topic: "analysis", - type: "numeric", - difficulty: 2, - question: - "Le learning rate optimal pour GD sur une fonction $L$-smooth est $\\eta = 1/L$. Si $L = 4$, $\\eta = $ ?", - expected: 0.25, - explanation: "$\\eta = 1/L = 1/4 = 0.25$.", - }, - { - id: "anal-013", - topic: "analysis", - type: "expression", - difficulty: 2, - question: - "La jacobienne de $f(x,y) = (x^2 + y, xy)$ a la forme $J = \\begin{pmatrix} a & b \\\\ c & d \\end{pmatrix}$. Que vaut $a$ ?", - expected: "2x", - explanation: "$J_{11} = \\frac{\\partial f_1}{\\partial x} = 2x$.", - }, - { - id: "anal-014", - topic: "analysis", - type: "numeric", - difficulty: 2, - question: - "Determinez si $\\sum_{n=1}^{\\infty} \\frac{1}{n^3}$ converge. Si oui, encadrez la somme entre 1 et 2.", - expected: 1.2, - tolerance: 0.3, - explanation: - "$\\sum 1/n^3$ converge (Riemann $\\alpha=3>1$). Sa valeur exacte est $\\zeta(3) \\approx 1.202$.", - }, - { - id: "anal-015", - topic: "analysis", - type: "qcm", - difficulty: 2, - question: - "La condition de Lagrange pour $\\min f(x)$ sous contrainte $g(x) = 0$ est :", - choices: [ - "$\\nabla f = \\lambda \\nabla g$", - "$\\nabla f = 0$", - "$\\nabla g = 0$", - "$f(x) = g(x)$", - ], - correct_index: 0, - explanation: - "Au point optimal : $\\nabla f = \\lambda \\nabla g$ (le gradient de $f$ est proportionnel a celui de $g$).", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Probabilites & Stats (Prepa ML) */ -/* ------------------------------------------------------------------ */ - -const PROBABILITY_EXERCISES: MathExercise[] = [ - { - id: "prob-001", - topic: "probability", - type: "numeric", - difficulty: 2, - question: - "Un test medical a une sensibilite de 99% et une specificite de 95%. La maladie touche 1% de la population. Quelle est $P(\\text{malade}|\\text{test}+)$ ? (arrondi au centieme)", - expected: 0.17, - tolerance: 0.01, - explanation: - "Bayes : $P(M|+) = \\frac{0.99 \\times 0.01}{0.99 \\times 0.01 + 0.05 \\times 0.99} = \\frac{0.0099}{0.0099 + 0.0495} \\approx 0.167$.", - }, - { - id: "prob-002", - topic: "probability", - type: "qcm", - difficulty: 2, - question: "L'esperance d'une loi de Poisson de parametre $\\lambda$ est :", - choices: [ - "$\\lambda$", - "$\\lambda^2$", - "$1/\\lambda$", - "$\\sqrt{\\lambda}$", - ], - correct_index: 0, - explanation: - "Pour $X \\sim \\text{Poisson}(\\lambda)$ : $E[X] = \\lambda$ et $\\text{Var}(X) = \\lambda$.", - }, - { - id: "prob-003", - topic: "probability", - type: "numeric", - difficulty: 2, - question: - "$X \\sim \\mathcal{N}(10, 4)$ (moyenne 10, variance 4). Calculez $E[X^2]$. (Rappel : $E[X^2] = \\text{Var}(X) + (E[X])^2$)", - expected: 104, - explanation: "$E[X^2] = \\text{Var}(X) + \\mu^2 = 4 + 100 = 104$.", - }, - { - id: "prob-004", - topic: "probability", - type: "qcm", - difficulty: 2, - question: - "Le theoreme central limite dit que la moyenne empirique $\\bar{X}_n$ :", - choices: [ - "Converge en loi vers $\\mathcal{N}(\\mu, \\sigma^2/n)$", - "Converge vers $\\mu$ presque surement", - "A toujours une distribution normale", - "Converge vers 0", - ], - correct_index: 0, - explanation: - "TCL : $\\bar{X}_n \\xrightarrow{\\mathcal{L}} \\mathcal{N}(\\mu, \\sigma^2/n)$.", - }, - { - id: "prob-005", - topic: "probability", - type: "numeric", - difficulty: 2, - question: - "Combien y a-t-il de facons de choisir 3 elements parmi 10 ? ($C_{10}^3$)", - expected: 120, - explanation: - "$C_{10}^3 = \\frac{10!}{3! \\cdot 7!} = \\frac{10 \\times 9 \\times 8}{6} = 120$.", - }, - { - id: "prob-006", - topic: "probability", - type: "expression", - difficulty: 3, - question: - "L'estimateur du maximum de vraisemblance de $\\mu$ pour $X_1,...,X_n \\sim \\mathcal{N}(\\mu, \\sigma^2)$ est :", - expected: "\\frac{1}{n}\\sum_{i=1}^{n}X_i", - explanation: - "Le MLE de $\\mu$ pour une loi normale est la moyenne empirique $\\hat{\\mu} = \\bar{X} = \\frac{1}{n}\\sum X_i$.", - }, - { - id: "prob-007", - topic: "probability", - type: "qcm", - difficulty: 2, - question: "La variance d'une loi $\\text{Bernoulli}(p)$ est :", - choices: ["$p(1-p)$", "$p$", "$p^2$", "$1-p$"], - correct_index: 0, - explanation: - "$\\text{Var}(X) = p(1-p)$ pour $X \\sim \\text{Bernoulli}(p)$.", - }, - { - id: "prob-008", - topic: "probability", - type: "numeric", - difficulty: 2, - question: - "Si $X$ et $Y$ sont independants avec $E[X] = 3$ et $E[Y] = 4$, que vaut $E[XY]$ ?", - expected: 12, - explanation: - "Par independance : $E[XY] = E[X] \\cdot E[Y] = 3 \\times 4 = 12$.", - }, - { - id: "prob-009", - topic: "probability", - type: "qcm", - difficulty: 3, - question: "La divergence KL $D_{KL}(P||Q)$ est :", - choices: [ - "Toujours $\\geq 0$ et nulle ssi $P = Q$", - "Symetrique en $P$ et $Q$", - "Une distance au sens metrique", - "Toujours $\\leq 1$", - ], - correct_index: 0, - explanation: - "La divergence KL est non-negative (inegalite de Gibbs) et $D_{KL}(P||Q) = 0 \\Leftrightarrow P = Q$. Elle n'est pas symetrique.", - }, - { - id: "prob-010", - topic: "probability", - type: "numeric", - difficulty: 2, - question: - "Un intervalle de confiance a 95% pour $\\mu$ (grand echantillon) est $\\bar{X} \\pm z_{0.025} \\cdot \\frac{\\sigma}{\\sqrt{n}}$. Si $\\bar{X} = 50$, $\\sigma = 10$, $n = 100$, quelle est la borne superieure ?", - expected: 51.96, - tolerance: 0.01, - explanation: - "$z_{0.025} = 1.96$. IC = $50 \\pm 1.96 \\times \\frac{10}{10} = 50 \\pm 1.96$. Borne sup = 51.96.", - }, - { - id: "prob-011", - topic: "probability", - type: "qcm", - difficulty: 2, - question: "La loi des grands nombres affirme que :", - choices: [ - "$\\bar{X}_n \\to \\mu$ en probabilite quand $n \\to \\infty$", - "$\\bar{X}_n$ est toujours egal a $\\mu$", - "La variance de $\\bar{X}_n$ augmente avec $n$", - "$P(X > \\mu)$ converge vers 1", - ], - correct_index: 0, - explanation: "LGN faible : $\\bar{X}_n \\xrightarrow{P} \\mu$.", - }, - { - id: "prob-012", - topic: "probability", - type: "numeric", - difficulty: 2, - question: - "L'entropie de Shannon d'une piece equilibree ($P(H) = P(T) = 0.5$) en bits est :", - expected: 1, - explanation: - "$H = -0.5\\log_2(0.5) - 0.5\\log_2(0.5) = -(-0.5) - (-0.5) = 1$ bit.", - }, - { - id: "prob-013", - topic: "probability", - type: "numeric", - difficulty: 3, - question: - "L'esperance d'une loi exponentielle de parametre $\\lambda = 2$ est :", - expected: 0.5, - explanation: "$E[X] = 1/\\lambda = 1/2 = 0.5$.", - }, - { - id: "prob-014", - topic: "probability", - type: "qcm", - difficulty: 3, - question: "La formule de Bayes est :", - choices: [ - "$P(A|B) = \\frac{P(B|A)P(A)}{P(B)}$", - "$P(A|B) = P(A)P(B)$", - "$P(A|B) = \\frac{P(A)}{P(B)}$", - "$P(A|B) = P(A) + P(B)$", - ], - correct_index: 0, - explanation: - "Formule de Bayes : $P(A|B) = \\frac{P(B|A) \\cdot P(A)}{P(B)}$.", - }, - { - id: "prob-015", - topic: "probability", - type: "numeric", - difficulty: 2, - question: "Combien d'arrangements de 3 elements parmi 5 ? ($A_5^3$)", - expected: 60, - explanation: "$A_5^3 = \\frac{5!}{2!} = 5 \\times 4 \\times 3 = 60$.", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Maths Appliquees ML (Prepa ML) */ -/* ------------------------------------------------------------------ */ - -const APPLIED_ML_EXERCISES: MathExercise[] = [ - { - id: "aml-001", - topic: "applied_ml", - type: "qcm", - difficulty: 2, - question: "La cross-entropy loss pour la classification binaire est :", - choices: [ - "$-[y\\log(p) + (1-y)\\log(1-p)]$", - "$(y - p)^2$", - "$|y - p|$", - "$y \\cdot p$", - ], - correct_index: 0, - explanation: - "Binary cross-entropy : $\\mathcal{L} = -[y\\log(p) + (1-y)\\log(1-p)]$.", - }, - { - id: "aml-002", - topic: "applied_ml", - type: "expression", - difficulty: 3, - question: "La mise a jour du gradient descent est : $w_{t+1} = $", - expected: "w_t-\\eta\\nabla L(w_t)", - explanation: - "$w_{t+1} = w_t - \\eta \\nabla L(w_t)$ ou $\\eta$ est le learning rate.", - }, - { - id: "aml-003", - topic: "applied_ml", - type: "qcm", - difficulty: 2, - question: "La regularisation Ridge (L2) ajoute au loss un terme :", - choices: [ - "$\\lambda ||w||_2^2$", - "$\\lambda ||w||_1$", - "$\\lambda ||w||_\\infty$", - "$\\lambda \\log(||w||)$", - ], - correct_index: 0, - explanation: - "Ridge = L2 penalty : $\\mathcal{L}_{reg} = \\mathcal{L} + \\lambda ||w||_2^2$.", - }, - { - id: "aml-004", - topic: "applied_ml", - type: "expression", - difficulty: 3, - question: "La solution fermee de la regression Ridge est $\\hat{w} = $", - expected: "(X^TX+\\lambda I)^{-1}X^Ty", - explanation: - "$\\hat{w} = (X^TX + \\lambda I)^{-1}X^Ty$. La regularisation rend la matrice toujours inversible.", - }, - { - id: "aml-005", - topic: "applied_ml", - type: "qcm", - difficulty: 2, - question: "La fonction softmax transforme un vecteur $z$ en :", - choices: [ - "Un vecteur de probabilites qui somment a 1", - "Un vecteur de valeurs entre -1 et 1", - "Un vecteur normalise de norme 1", - "Un vecteur binaire", - ], - correct_index: 0, - explanation: - "$\\text{softmax}(z_i) = \\frac{e^{z_i}}{\\sum_j e^{z_j}}$. Les sorties sont positives et somment a 1.", - }, - { - id: "aml-006", - topic: "applied_ml", - type: "numeric", - difficulty: 2, - question: "La fonction sigmoid $\\sigma(0)$ vaut :", - expected: 0.5, - explanation: - "$\\sigma(x) = \\frac{1}{1+e^{-x}}$. $\\sigma(0) = \\frac{1}{1+1} = 0.5$.", - }, - { - id: "aml-007", - topic: "applied_ml", - type: "qcm", - difficulty: 3, - question: "Adam combine :", - choices: [ - "Momentum et RMSprop", - "SGD et L2 regularization", - "Gradient descent et Newton", - "Dropout et batch norm", - ], - correct_index: 0, - explanation: - "Adam = Adaptive Moment Estimation, combinant les moyennes mobiles du gradient (momentum) et du gradient carre (RMSprop).", - }, - { - id: "aml-008", - topic: "applied_ml", - type: "expression", - difficulty: 2, - question: "La derivee de la sigmoid $\\sigma(x)$ est $\\sigma'(x) = $", - expected: "\\sigma(x)(1-\\sigma(x))", - explanation: - "$\\sigma'(x) = \\sigma(x)(1 - \\sigma(x))$. Maximum a $x = 0$ : $\\sigma'(0) = 0.25$.", - }, - { - id: "aml-009", - topic: "applied_ml", - type: "numeric", - difficulty: 2, - question: - "Si la MSE loss est $L = \\frac{1}{2}(y - wx)^2$ avec $y=3, x=2, w=1$, que vaut $\\frac{\\partial L}{\\partial w}$ ?", - expected: -2, - explanation: - "$\\frac{\\partial L}{\\partial w} = -(y - wx)x = -(3 - 2) \\times 2 = -2$.", - }, - { - id: "aml-010", - topic: "applied_ml", - type: "qcm", - difficulty: 2, - question: "La backpropagation utilise :", - choices: [ - "La chain rule pour calculer les gradients couche par couche", - "La descente de gradient stochastique uniquement", - "Des approximations numeriques du gradient", - "La methode de Newton", - ], - correct_index: 0, - explanation: - "Backprop = application systematique de la chain rule pour propager les gradients de la loss vers les parametres.", - }, - { - id: "aml-011", - topic: "applied_ml", - type: "numeric", - difficulty: 2, - question: "ReLU($-3$) = ?", - expected: 0, - explanation: "$\\text{ReLU}(x) = \\max(0, x)$. $\\text{ReLU}(-3) = 0$.", - }, - { - id: "aml-012", - topic: "applied_ml", - type: "qcm", - difficulty: 3, - question: - "Le biais-variance tradeoff dit que l'erreur de generalisation = :", - choices: [ - "$\\text{Biais}^2 + \\text{Variance} + \\text{Bruit irreductible}$", - "$\\text{Biais} + \\text{Variance}$", - "$\\text{Biais} \\times \\text{Variance}$", - "$\\frac{\\text{Biais}}{\\text{Variance}}$", - ], - correct_index: 0, - explanation: - "Decomposition biais-variance : $\\text{Erreur} = \\text{Biais}^2 + \\text{Variance} + \\sigma^2_{\\text{bruit}}$.", - }, - { - id: "aml-013", - topic: "applied_ml", - type: "numeric", - difficulty: 2, - question: - "Batch Normalization normalise les activations. Si $\\mu = 5$ et $\\sigma = 2$, $\\text{BN}(9) = $", - expected: 2, - explanation: - "$\\text{BN}(x) = \\frac{x - \\mu}{\\sigma} = \\frac{9 - 5}{2} = 2$ (avant affine transform).", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Exercise bank lookup */ -/* ------------------------------------------------------------------ */ - -const ALL_EXERCISES: Record = { - diagnostic: DIAGNOSTIC_EXERCISES, - college: COLLEGE_EXERCISES, - lycee: LYCEE_EXERCISES, - terminale: TERMINALE_EXERCISES, - linear_algebra: LINEAR_ALGEBRA_EXERCISES, - analysis: ANALYSIS_EXERCISES, - probability: PROBABILITY_EXERCISES, - applied_ml: APPLIED_ML_EXERCISES, -}; - -export function get_exercises_for_topic(topic: TopicId): MathExercise[] { - return ALL_EXERCISES[topic] ?? []; -} - -export function get_session_exercises( - topic: TopicId, - count = 10, -): MathExercise[] { - const all = get_exercises_for_topic(topic); - // Shuffle and take `count` - const shuffled = [...all].sort(() => Math.random() - 0.5); - return shuffled.slice(0, Math.min(count, shuffled.length)); -} diff --git a/frontend/src/components/math-exercise/exercise-types.ts b/frontend/src/components/math-exercise/exercise-types.ts deleted file mode 100644 index 3828775..0000000 --- a/frontend/src/components/math-exercise/exercise-types.ts +++ /dev/null @@ -1,108 +0,0 @@ -/* ------------------------------------------------------------------ */ -/* Math Exercise types */ -/* ------------------------------------------------------------------ */ - -export type ExerciseType = "qcm" | "numeric" | "expression"; -export type Difficulty = 1 | 2 | 3; - -export type TopicId = - | "diagnostic" - | "college" - | "lycee" - | "terminale" - | "linear_algebra" - | "analysis" - | "probability" - | "applied_ml"; - -export type TrackId = "zero_to_one" | "prepa_ml"; - -/* ------------------------------------------------------------------ */ -/* Exercise definitions */ -/* ------------------------------------------------------------------ */ - -interface BaseExercise { - id: string; - topic: TopicId; - question: string; // may contain LaTeX wrapped in $..$ or $$..$$ - explanation: string; - difficulty: Difficulty; -} - -export interface QcmExercise extends BaseExercise { - type: "qcm"; - choices: string[]; // 4 choices, may contain LaTeX - correct_index: number; // 0-based -} - -export interface NumericExercise extends BaseExercise { - type: "numeric"; - expected: number; - tolerance?: number; // default 0.001 -} - -export interface ExpressionExercise extends BaseExercise { - type: "expression"; - expected: string; // canonical LaTeX expression -} - -export type MathExercise = QcmExercise | NumericExercise | ExpressionExercise; - -/* ------------------------------------------------------------------ */ -/* Results & progress */ -/* ------------------------------------------------------------------ */ - -export interface ExerciseResult { - exercise_id: string; - correct: boolean; - time_ms: number; -} - -export interface SessionResult { - topic: TopicId; - total: number; - correct: number; - accuracy_pct: number; - duration_seconds: number; - date: string; // ISO date - results: ExerciseResult[]; -} - -export interface TopicProgress { - total_attempted: number; - total_correct: number; - streak: number; - best_streak: number; - last_session_date: string | null; - mastery_level: "not_started" | "learning" | "practicing" | "mastered"; - history: { date: string; score: number; count: number }[]; -} - -export interface MathProgressStore { - [topic_id: string]: TopicProgress; -} - -/* ------------------------------------------------------------------ */ -/* Topic metadata for PracticeTab */ -/* ------------------------------------------------------------------ */ - -export interface TopicMeta { - id: TopicId; - label: string; - track: TrackId; -} - -export const TOPIC_METAS: TopicMeta[] = [ - { - id: "diagnostic", - label: "Diagnostic & Arithmetique", - track: "zero_to_one", - }, - { id: "college", label: "College (6e-3e)", track: "zero_to_one" }, - { id: "lycee", label: "Seconde-Premiere", track: "zero_to_one" }, - { id: "terminale", label: "Terminale S", track: "zero_to_one" }, - { id: "linear_algebra", label: "Algebre Lineaire", track: "prepa_ml" }, - { id: "analysis", label: "Analyse", track: "prepa_ml" }, - { id: "probability", label: "Probabilites & Stats", track: "prepa_ml" }, - { id: "applied_ml", label: "Maths Appliquees ML", track: "prepa_ml" }, -]; diff --git a/frontend/src/hooks/useMathProgress.ts b/frontend/src/hooks/useMathProgress.ts deleted file mode 100644 index 092a7b7..0000000 --- a/frontend/src/hooks/useMathProgress.ts +++ /dev/null @@ -1,133 +0,0 @@ -import { useCallback } from "react"; - -import { useLocalStorage } from "./useLocalStorage"; -import type { - MathProgressStore, - TopicProgress, - SessionResult, - TopicId, -} from "../components/math-exercise/exercise-types"; - -/* ------------------------------------------------------------------ */ -/* Defaults */ -/* ------------------------------------------------------------------ */ - -const EMPTY_PROGRESS: TopicProgress = { - total_attempted: 0, - total_correct: 0, - streak: 0, - best_streak: 0, - last_session_date: null, - mastery_level: "not_started", - history: [], -}; - -const MAX_HISTORY = 20; - -function compute_mastery( - total_attempted: number, - accuracy: number, -): TopicProgress["mastery_level"] { - if (total_attempted === 0) return "not_started"; - if (total_attempted < 10) return "learning"; - if (accuracy >= 85) return "mastered"; - return "practicing"; -} - -/* ------------------------------------------------------------------ */ -/* Hook */ -/* ------------------------------------------------------------------ */ - -export function useMathProgress() { - const [store, set_store] = useLocalStorage( - "math-progress", - {}, - ); - - const get_topic_progress = useCallback( - (topic: TopicId): TopicProgress => { - return store[topic] ?? EMPTY_PROGRESS; - }, - [store], - ); - - const record_session = useCallback( - (result: SessionResult) => { - set_store((prev) => { - const existing = prev[result.topic] ?? { ...EMPTY_PROGRESS }; - - const new_total_attempted = existing.total_attempted + result.total; - const new_total_correct = existing.total_correct + result.correct; - - // Update streak: consecutive correct in this session - const session_all_correct = result.correct === result.total; - const new_streak = session_all_correct ? existing.streak + 1 : 0; - const new_best_streak = Math.max(existing.best_streak, new_streak); - - const overall_accuracy = - new_total_attempted > 0 - ? Math.round((new_total_correct / new_total_attempted) * 100) - : 0; - - const history_entry = { - date: result.date, - score: result.accuracy_pct, - count: result.total, - }; - - const new_history = [history_entry, ...existing.history].slice( - 0, - MAX_HISTORY, - ); - - return { - ...prev, - [result.topic]: { - total_attempted: new_total_attempted, - total_correct: new_total_correct, - streak: new_streak, - best_streak: new_best_streak, - last_session_date: result.date, - mastery_level: compute_mastery( - new_total_attempted, - overall_accuracy, - ), - history: new_history, - }, - }; - }); - }, - [set_store], - ); - - const get_overall_stats = useCallback(() => { - let total_attempted = 0; - let total_correct = 0; - let topics_started = 0; - let topics_mastered = 0; - - for (const progress of Object.values(store)) { - total_attempted += progress.total_attempted; - total_correct += progress.total_correct; - if (progress.mastery_level !== "not_started") topics_started++; - if (progress.mastery_level === "mastered") topics_mastered++; - } - - return { - total_attempted, - total_correct, - accuracy_pct: - total_attempted > 0 - ? Math.round((total_correct / total_attempted) * 100) - : 0, - topics_started, - topics_mastered, - }; - }, [store]); - - return { - get_topic_progress, - record_session, - get_overall_stats, - }; -} diff --git a/frontend/src/lib/workflowInventory.ts b/frontend/src/lib/workflowInventory.ts index 560223e..906375c 100644 --- a/frontend/src/lib/workflowInventory.ts +++ b/frontend/src/lib/workflowInventory.ts @@ -9,7 +9,6 @@ export type WorkflowId = | "linkedin_events" | "dev_reference" | "interview_prep" - | "math_refresh" | "chinese" | "culture_generale" | "applied_systems" @@ -158,18 +157,6 @@ export const WORKFLOW_INVENTORY: WorkflowInventoryItem[] = [ "Distributed Systems", ], }, - { - id: "math_refresh", - title: "Maths", - description: - "Two study tracks: Zero-to-One (collège through terminale) and Prépa ML (linear algebra through applied ML). Exercises and evaluations included.", - capabilities: [ - "Zero-to-One track (collège → terminale)", - "Prépa ML track (linear algebra → applied ML)", - "Guided exercises", - "Evaluation tests", - ], - }, { id: "chinese", title: "Chinese", @@ -257,10 +244,7 @@ export const WORKFLOW_INVENTORY: WorkflowInventoryItem[] = [ title: "Innervisions", description: "Your Innervisions release identity — Sonic DNA for musical direction and a reference track library for taste calibration.", - capabilities: [ - "Sonic DNA", - "Reference track library", - ], + capabilities: ["Sonic DNA", "Reference track library"], }, { id: "shopify_module48", diff --git a/frontend/src/router.tsx b/frontend/src/router.tsx index fc35e35..fc5ad55 100644 --- a/frontend/src/router.tsx +++ b/frontend/src/router.tsx @@ -57,7 +57,6 @@ import { AIEngineeringRoute } from "./routes/ai-engineering"; import { FrontendEngRoute } from "./routes/frontend-eng"; import { GpuForAIRoute } from "./routes/gpu-for-ai"; import { BioAugmentationRoute } from "./routes/bio-augmentation"; -import { MathRefreshRoute } from "./routes/math-refresh"; import { CultureGeneraleRoute } from "./routes/culture-generale"; import { CognitiveToolkitRoute } from "./routes/cognitive-toolkit"; import { BehavioralDesignRoute } from "./routes/behavioral-design"; @@ -92,8 +91,6 @@ import { HowMonitorWorksRoute } from "./routes/how-monitor-works"; import { HowGpuWorksRoute } from "./routes/how-gpu-works"; import { PricingRoute } from "./routes/pricing"; import { CareerFoundationsRoute } from "./routes/career-foundations"; -import { MathBridgeRoute } from "./routes/math-bridge"; -import { MathLandingRoute } from "./routes/math"; export type CareerTab = "accelerator" | "opportunities"; @@ -276,27 +273,6 @@ export interface HfProjectsSearchParams { tab?: HfProjectsTab; } -export type MathRefreshTrack = "zero_to_one" | "prepa_ml"; -export type MathRefreshTab = - | "methode" - | "diagnostic" - | "college" - | "lycee" - | "terminale" - | "pratique" - | "evaluation" - | "linear_algebra" - | "analysis" - | "probability" - | "applied_ml" - | "geometry_3d" - | "dynamics_physics"; - -export interface MathRefreshSearchParams { - track?: MathRefreshTrack; - tab?: MathRefreshTab; -} - export type CultureGeneraleTrack = | "sciences" | "humanites" @@ -379,17 +355,6 @@ export interface CareerFoundationsSearchParams { tab?: CareerFoundationsTab; } -export type MathBridgeTab = - | "overview" - | "core_numeracy" - | "high_school" - | "pre_university" - | "engineering_prep"; - -export interface MathBridgeSearchParams { - tab?: MathBridgeTab; -} - export type ExecutionPlaybookTab = | "checklists" | "prompt_tactics" @@ -422,7 +387,6 @@ export type ReferenceSection = | "prep" | "dev-ref" | "applied-systems" - | "math-refresh" | "elite-freelance" | "ai-engineering" | "frontend-eng" @@ -1722,7 +1686,6 @@ const VALID_REFERENCE_SECTIONS = new Set([ "prep", "dev-ref", "applied-systems", - "math-refresh", "elite-freelance", "ai-engineering", "domain-ontology", @@ -1906,23 +1869,6 @@ const eliteToolboxRoute = createRoute({ component: EliteToolboxRoute, }); -const VALID_MATH_REFRESH_TRACKS = new Set(["zero_to_one", "prepa_ml"]); -const VALID_MATH_REFRESH_TABS = new Set([ - "methode", - "diagnostic", - "college", - "lycee", - "terminale", - "pratique", - "evaluation", - "linear_algebra", - "analysis", - "probability", - "applied_ml", - "geometry_3d", - "dynamics_physics", -]); - const VALID_CULTURE_GENERALE_TRACKS = new Set([ "sciences", "humanites", @@ -1940,89 +1886,6 @@ const VALID_CULTURE_GENERALE_TABS = new Set([ "history", ]); -/* ── Canonical /math hub ──────────────────────────────────────────── */ -const mathRoute = createRoute({ - getParentRoute: () => appRoute, - path: "/math", - errorComponent: ({ error, reset }) => ( - - ), - component: MathLandingRoute, -}); - -const mathRefreshCanonicalRoute = createRoute({ - getParentRoute: () => appRoute, - path: "/math/refresh", - validateSearch: ( - search: Record, - ): MathRefreshSearchParams => { - const result: MathRefreshSearchParams = {}; - if ( - typeof search.track === "string" && - VALID_MATH_REFRESH_TRACKS.has(search.track) - ) { - result.track = search.track as MathRefreshTrack; - } - if ( - typeof search.tab === "string" && - VALID_MATH_REFRESH_TABS.has(search.tab) - ) { - result.tab = search.tab as MathRefreshTab; - } - return result; - }, - errorComponent: ({ error, reset }) => ( - - ), - component: MathRefreshRoute, -}); - -const mathBridgeCanonicalRoute = createRoute({ - getParentRoute: () => appRoute, - path: "/math/bridge", - validateSearch: (search: Record): MathBridgeSearchParams => { - const result: MathBridgeSearchParams = {}; - if ( - typeof search.tab === "string" && - VALID_MATH_BRIDGE_TABS.has(search.tab) - ) { - result.tab = search.tab as MathBridgeTab; - } - return result; - }, - errorComponent: ({ error, reset }) => ( - - ), - component: MathBridgeRoute, -}); - -/* ── Legacy redirects ────────────────────────────────────────────── */ -const mathRefreshRoute = createRoute({ - getParentRoute: () => appRoute, - path: "/math-refresh", - validateSearch: ( - search: Record, - ): MathRefreshSearchParams => { - const result: MathRefreshSearchParams = {}; - if ( - typeof search.track === "string" && - VALID_MATH_REFRESH_TRACKS.has(search.track) - ) { - result.track = search.track as MathRefreshTrack; - } - if ( - typeof search.tab === "string" && - VALID_MATH_REFRESH_TABS.has(search.tab) - ) { - result.tab = search.tab as MathRefreshTab; - } - return result; - }, - beforeLoad: ({ search }) => { - throw redirect({ to: "/math/refresh", search, replace: true }); - }, -}); - const cultureGeneraleRoute = createRoute({ getParentRoute: () => appRoute, path: "/culture-generale", @@ -2153,32 +2016,6 @@ const VALID_CAREER_FOUNDATIONS_TABS = new Set([ "supply_chain", ]); -const VALID_MATH_BRIDGE_TABS = new Set([ - "overview", - "core_numeracy", - "high_school", - "pre_university", - "engineering_prep", -]); - -const mathBridgeRoute = createRoute({ - getParentRoute: () => appRoute, - path: "/math-bridge", - validateSearch: (search: Record): MathBridgeSearchParams => { - const result: MathBridgeSearchParams = {}; - if ( - typeof search.tab === "string" && - VALID_MATH_BRIDGE_TABS.has(search.tab) - ) { - result.tab = search.tab as MathBridgeTab; - } - return result; - }, - beforeLoad: ({ search }) => { - throw redirect({ to: "/math/bridge", search, replace: true }); - }, -}); - const careerFoundationsRoute = createRoute({ getParentRoute: () => appRoute, path: "/career-foundations", @@ -2458,16 +2295,11 @@ const routeTree = rootRoute.addChildren([ harnessDevToolsRoute, gpuForAIRoute, bioAugmentationRoute, - mathRoute, - mathRefreshCanonicalRoute, - mathBridgeCanonicalRoute, - mathRefreshRoute, cultureGeneraleRoute, cognitiveToolkitRoute, behavioralDesignRoute, eliteFreelanceRoute, module48Route, - mathBridgeRoute, careerFoundationsRoute, toolingRoute, legacyHouseRulesRoute, diff --git a/frontend/src/routes/math-bridge.tsx b/frontend/src/routes/math-bridge.tsx deleted file mode 100644 index a82993d..0000000 --- a/frontend/src/routes/math-bridge.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { lazy, Suspense } from "react"; - -const MathBridgeView = lazy(() => - import("../views/MathBridgeView").then((m) => ({ - default: m.MathBridgeView, - })), -); - -function LoadingSpinner() { - return ( -
-
-
- ); -} - -export function MathBridgeRoute() { - return ( - }> - - - ); -} diff --git a/frontend/src/routes/math-refresh.tsx b/frontend/src/routes/math-refresh.tsx deleted file mode 100644 index a5bd793..0000000 --- a/frontend/src/routes/math-refresh.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { lazy, Suspense } from "react"; - -const MathRefreshView = lazy(() => - import("../views/MathRefreshView").then((m) => ({ - default: m.MathRefreshView, - })), -); - -function LoadingSpinner() { - return ( -
-
-
- ); -} - -export function MathRefreshRoute() { - return ( - }> - - - ); -} diff --git a/frontend/src/routes/math.tsx b/frontend/src/routes/math.tsx deleted file mode 100644 index 34b813a..0000000 --- a/frontend/src/routes/math.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { lazy, Suspense } from "react"; - -const MathLandingView = lazy(() => - import("../views/MathLandingView").then((m) => ({ - default: m.MathLandingView, - })), -); - -function LoadingSpinner() { - return ( -
-
-
- ); -} - -export function MathLandingRoute() { - return ( - }> - - - ); -} diff --git a/frontend/src/views/CareerFoundationsView.tsx b/frontend/src/views/CareerFoundationsView.tsx index 56d8eae..efa981a 100644 --- a/frontend/src/views/CareerFoundationsView.tsx +++ b/frontend/src/views/CareerFoundationsView.tsx @@ -556,7 +556,7 @@ const DOMAIN_TO_FOUNDATION: Record< section: "Missing from your MSc", tab: "foundations", description: - "Probability and inference foundations live in the math section - review discrete math and distributions.", + "Probability and inference foundations - review discrete math and distributions.", }, programming: { section: "Never in your degree", diff --git a/frontend/src/views/CompletionView.tsx b/frontend/src/views/CompletionView.tsx index 7aee739..f915f7c 100644 --- a/frontend/src/views/CompletionView.tsx +++ b/frontend/src/views/CompletionView.tsx @@ -129,18 +129,6 @@ const PILLARS: Pillar[] = [ missing: "Booking/gig CRM, production checklists, plugin inventory", next: "Gig prospection CRM", }, - { - id: 9, - name: "Math & Sciences", - group: "Foundations", - status: "live", - coverage: 60, - surfaces: ["/math-refresh"], - built: - "Two tracks, interactive exercises (QCM, numeric, LaTeX), localStorage progress", - missing: "No API persistence, no mastery badges, no spaced repetition", - next: "API-backed progress", - }, { id: 10, name: "Culture Generale", @@ -331,7 +319,7 @@ const QUEUE: QueueItem[] = [ { rank: 1, feature: "Foundation progress", - pillars: "Math, Culture, Cognitive, Behavioral, Bio", + pillars: "Culture, Cognitive, Behavioral, Bio", now: "50-60% - progress stuck in localStorage, lost across devices", target: "70-75% - progress saved to API, works everywhere", state: "plan_ready", diff --git a/frontend/src/views/ConceptDetailView.tsx b/frontend/src/views/ConceptDetailView.tsx index e6fc8fa..a03f8fc 100644 --- a/frontend/src/views/ConceptDetailView.tsx +++ b/frontend/src/views/ConceptDetailView.tsx @@ -18,7 +18,7 @@ import ReactMarkdown from "react-markdown"; import remarkGfm from "remark-gfm"; import { useDocumentTitle } from "../hooks/useDocumentTitle"; import { PremiumPage } from "../components/layout/PremiumShell"; -import { MathText } from "../components/math-exercise/MathRenderer"; +import { MathText } from "../components/MathRenderer"; import { api } from "../lib/api/endpoints"; import { useQuery } from "../lib/query/useQuery"; import { queryKeys } from "../lib/query/keys"; diff --git a/frontend/src/views/MathBridgeLevelContent.tsx b/frontend/src/views/MathBridgeLevelContent.tsx deleted file mode 100644 index bc90e3b..0000000 --- a/frontend/src/views/MathBridgeLevelContent.tsx +++ /dev/null @@ -1,374 +0,0 @@ -import { useState } from "react"; -import { motion } from "motion/react"; -import { - ArrowRight, - BookOpen, - ExternalLink, - Play, - Puzzle, - PenTool, - Lock, - CheckCircle2, - ClipboardCheck, -} from "lucide-react"; - -import { PremiumCard } from "../components/layout/PremiumShell"; -import type { BridgeLevel, BridgeResource } from "./math-bridge-data"; -import { - MVP_SKILLS, - computeStatus, - getSkillProgress, - getLevelReadiness, - type ProgressMap, -} from "./math-bridge-progress"; -import { getCheckForSkill } from "./math-bridge-checks"; -import { MathBridgeMicroCheck } from "./MathBridgeMicroCheck"; -import { LevelReadinessBanner } from "./MathBridgeReadinessBanner"; - -const RESOURCE_ICONS: Record = { - video: Play, - textbook: BookOpen, - interactive: Puzzle, - practice: PenTool, -}; - -const RESOURCE_LABELS: Record = { - video: "Video", - textbook: "Textbook", - interactive: "Interactive", - practice: "Practice", -}; - -const CONFIDENCE_OPTIONS: { value: 1 | 2 | 3; label: string }[] = [ - { value: 1, label: "Not at all" }, - { value: 2, label: "Somewhat" }, - { value: 3, label: "Confident" }, -]; - -interface MathBridgeLevelContentProps { - level: BridgeLevel; - levelTab: string; - color: string; - progress: ProgressMap; - onConfidence: (slug: string, confidence: 1 | 2 | 3) => void; - onCheckScore: (slug: string, score: number) => void; -} - -export function MathBridgeLevelContent({ - level, - levelTab, - color, - progress, - onConfidence, - onCheckScore, -}: MathBridgeLevelContentProps) { - const [activeCheck, setActiveCheck] = useState(null); - const readiness = getLevelReadiness(levelTab, progress); - - return ( -
- {/* Readiness banner */} - - - {/* Stats row */} -
- - - {level.topics.length} - {" "} - topics - - · - - - ~{level.totalHours} - {" "} - hours estimated - - · - Level {level.level} -
- - {/* Topic cards grid */} -
- {level.topics.map((topic, i) => { - const isMvp = MVP_SKILLS.has(topic.slug); - const status = isMvp ? computeStatus(topic.slug, progress) : null; - const skillProgress = isMvp - ? getSkillProgress(progress, topic.slug) - : null; - const check = isMvp ? getCheckForSkill(topic.slug) : null; - const isCheckOpen = activeCheck === topic.slug; - - return ( - - - {/* Header */} -
-
- {/* Status icon for MVP skills */} - {status === "solid" && ( - - )} - {status === "locked" && ( - - )} -
-

- {topic.name} -

-

- ~{topic.estimatedHours} hours -

-
-
-
- {status === "solid" && ( - - Solid - - )} - {status === "locked" && ( - - Locked - - )} - {topic.prerequisite && ( - - after {topic.prerequisite} - - )} -
-
- - {/* Why it matters */} -

- {topic.whyItMatters} -

- - {/* Confidence selector (MVP skills, not locked) */} - {isMvp && status !== "locked" && skillProgress && ( -
-

- How confident are you? -

-
- {CONFIDENCE_OPTIONS.map((opt) => { - const isActive = skillProgress.confidence === opt.value; - return ( - - ); - })} -
-
- )} - - {/* Last check score */} - {isMvp && - skillProgress && - skillProgress.lastCheckScore !== null && ( -
- Last score: - = 70 - ? "#4ade80" - : skillProgress.lastCheckScore >= 34 - ? "#ffc47c" - : "#eb5757", - fontWeight: 590, - }} - > - {skillProgress.lastCheckScore}% - - {skillProgress.attempts > 1 && ( - - ({skillProgress.attempts} attempts) - - )} -
- )} - - {/* Take check button (MVP skills, not locked) */} - {isMvp && status !== "locked" && check && !isCheckOpen && ( - - )} - - {/* Micro-check inline */} - {isCheckOpen && check && ( - onCheckScore(topic.slug, score)} - onClose={() => setActiveCheck(null)} - /> - )} - - {/* Key concepts */} - {status !== "locked" && ( -
-

- Key concepts -

-
- {topic.keyConcepts.map((concept) => ( - - {concept} - - ))} -
-
- )} - - {/* Bridges to (Level 3 only) */} - {topic.bridgesTo && - topic.bridgesTo.length > 0 && - status !== "locked" && ( -
-

- Bridges to -

-
- {topic.bridgesTo.map((target) => ( -
- - {target} -
- ))} -
-
- )} - - {/* Resources */} - {topic.resources.length > 0 && status !== "locked" && ( -
-
- {topic.resources.map((resource) => { - const Icon = RESOURCE_ICONS[resource.type]; - return ( - - - {resource.title} - - {RESOURCE_LABELS[resource.type]} - - {resource.free && ( - - Free - - )} - - - ); - })} -
-
- )} -
-
- ); - })} -
-
- ); -} diff --git a/frontend/src/views/MathBridgeMicroCheck.tsx b/frontend/src/views/MathBridgeMicroCheck.tsx deleted file mode 100644 index 70d351d..0000000 --- a/frontend/src/views/MathBridgeMicroCheck.tsx +++ /dev/null @@ -1,193 +0,0 @@ -import { useState } from "react"; -import { motion } from "motion/react"; -import { CheckCircle2, XCircle, RotateCcw } from "lucide-react"; - -import { PremiumCard } from "../components/layout/PremiumShell"; -import type { SkillCheck } from "./math-bridge-checks"; -import { scoreCheck } from "./math-bridge-checks"; - -interface MathBridgeMicroCheckProps { - check: SkillCheck; - color: string; - onComplete: (score: number) => void; - onClose: () => void; -} - -export function MathBridgeMicroCheck({ - check, - color, - onComplete, - onClose, -}: MathBridgeMicroCheckProps) { - const [answers, setAnswers] = useState<(number | null)[]>([null, null, null]); - const [submitted, setSubmitted] = useState(false); - - const allAnswered = answers.every((a) => a !== null); - - function handleSelect(questionIndex: number, choiceIndex: number) { - if (submitted) return; - setAnswers((prev) => { - const next = [...prev]; - next[questionIndex] = choiceIndex; - return next; - }); - } - - function handleSubmit() { - if (!allAnswered) return; - const score = scoreCheck(answers as number[], check); - setSubmitted(true); - onComplete(score); - } - - function handleRetry() { - setAnswers([null, null, null]); - setSubmitted(false); - } - - const score = submitted ? scoreCheck(answers as number[], check) : null; - - return ( - - -
-

- Quick check - 3 questions -

- -
- -
- {check.questions.map((q, qi) => ( -
-

- {qi + 1}. {q.question} -

-
- {q.choices.map((choice, ci) => { - const isSelected = answers[qi] === ci; - const isCorrect = ci === q.correctIndex; - const showResult = submitted; - - let borderColor = "rgba(255,255,255,0.08)"; - let bgColor = "transparent"; - if (showResult && isSelected && isCorrect) { - borderColor = "#4ade80"; - bgColor = "rgba(74,222,128,0.06)"; - } else if (showResult && isSelected && !isCorrect) { - borderColor = "#eb5757"; - bgColor = "rgba(235,87,87,0.06)"; - } else if (showResult && isCorrect) { - borderColor = "rgba(74,222,128,0.3)"; - } else if (isSelected) { - borderColor = color; - bgColor = `${color}08`; - } - - return ( - - ); - })} -
-
- ))} -
- - {/* Actions */} -
- {!submitted && ( - - )} - - {submitted && score !== null && ( - <> -
- = 70 - ? "#4ade80" - : score >= 34 - ? "#ffc47c" - : "#eb5757", - fontWeight: 590, - }} - > - {score}% - - - {score >= 70 - ? "Looking good" - : score >= 34 - ? "Getting there" - : "Keep practicing"} - -
- - - )} -
-
-
- ); -} diff --git a/frontend/src/views/MathBridgeOverview.tsx b/frontend/src/views/MathBridgeOverview.tsx deleted file mode 100644 index fdb9f43..0000000 --- a/frontend/src/views/MathBridgeOverview.tsx +++ /dev/null @@ -1,227 +0,0 @@ -import { motion } from "motion/react"; -import { ArrowRight, ChevronRight } from "lucide-react"; -import { useNavigate } from "@tanstack/react-router"; - -import { PremiumCard } from "../components/layout/PremiumShell"; -import { BRIDGE_LEVELS, TAB_META } from "./math-bridge-data"; -import type { LevelTab } from "./math-bridge-data"; -import { - getLevelReadiness, - isFoundationsReady, - type ProgressMap, -} from "./math-bridge-progress"; -import { FoundationsCta } from "./MathBridgeReadinessBanner"; - -const LEVEL_TABS: LevelTab[] = [ - "core_numeracy", - "high_school", - "pre_university", - "engineering_prep", -]; - -interface MathBridgeOverviewProps { - progress: ProgressMap; -} - -export function MathBridgeOverview({ progress }: MathBridgeOverviewProps) { - const navigate = useNavigate(); - const foundationsReady = isFoundationsReady(progress); - - const totalTopics = LEVEL_TABS.reduce( - (sum, tab) => sum + BRIDGE_LEVELS[tab].topics.length, - 0, - ); - const totalHours = LEVEL_TABS.reduce( - (sum, tab) => sum + BRIDGE_LEVELS[tab].totalHours, - 0, - ); - - return ( -
- {/* Stats */} -
- - 4 levels - - · - - {totalTopics}{" "} - topics - - · - - ~{totalHours}{" "} - hours total - -
- - {/* How it works */} - -

- How this works -

-

- Four levels, each building on the last. Start wherever you feel shaky - - there's no shame in going back to basics. Rate your confidence, take - quick checks, and track your readiness for each level. The goal is to - arrive at the Foundations tab with zero gaps. -

-
- - {/* Level progression */} -
- {LEVEL_TABS.map((tab, i) => { - const level = BRIDGE_LEVELS[tab]; - const meta = TAB_META[tab]; - const readiness = getLevelReadiness(tab, progress); - - return ( - - - - ); - })} -
- - {/* Foundations CTA (replaces static card when ready) */} - {foundationsReady ? ( - - ) : ( - - -
- -
-

- After Level 3, you're ready for Foundations -

-

- The Career Foundations tab picks up where this program ends - - linear algebra for ML, deep learning, systems design, and - career-specific prep. -

-
-
-
-
- )} -
- ); -} diff --git a/frontend/src/views/MathBridgeReadinessBanner.tsx b/frontend/src/views/MathBridgeReadinessBanner.tsx deleted file mode 100644 index fe3bb1b..0000000 --- a/frontend/src/views/MathBridgeReadinessBanner.tsx +++ /dev/null @@ -1,129 +0,0 @@ -import { motion } from "motion/react"; -import { ArrowRight, CheckCircle2, Target } from "lucide-react"; -import { useNavigate } from "@tanstack/react-router"; - -import { PremiumCard } from "../components/layout/PremiumShell"; -import type { LevelReadiness } from "./math-bridge-progress"; - -interface LevelReadinessBannerProps { - readiness: LevelReadiness; - color: string; -} - -export function LevelReadinessBanner({ - readiness, - color, -}: LevelReadinessBannerProps) { - if (readiness.total === 0) return null; - - return ( - - -
- {/* Progress bar */} -
- - {readiness.percentage}% - -
-
-
- - {readiness.solid} of {readiness.total} skills solid - -
- - {/* Weak skills */} - {readiness.weakSkills.length > 0 && ( -
- - - Focus on:{" "} - - {readiness.weakSkills.join(", ")} - - -
- )} - - {/* All solid */} - {readiness.percentage === 100 && ( -
- - All skills solid for this level -
- )} -
- - - ); -} - -interface FoundationsCtaProps { - ready: boolean; -} - -export function FoundationsCta({ ready }: FoundationsCtaProps) { - const navigate = useNavigate(); - - if (!ready) return null; - - return ( - - -
- -
-

- You are ready to enter Career Foundations -

-

- All Level 3 skills are solid. Time to move on to ML foundations, - deep learning, and career-specific prep. -

-
- -
-
-
- ); -} diff --git a/frontend/src/views/MathBridgeView.tsx b/frontend/src/views/MathBridgeView.tsx deleted file mode 100644 index 8b1558b..0000000 --- a/frontend/src/views/MathBridgeView.tsx +++ /dev/null @@ -1,83 +0,0 @@ -import { useState, useCallback } from "react"; -import { useSearch } from "@tanstack/react-router"; - -import { PremiumPage, PremiumHero } from "../components/layout/PremiumShell"; -import { TAB_ORDER, TAB_META, BRIDGE_LEVELS } from "./math-bridge-data"; -import type { MathBridgeTab, LevelTab } from "./math-bridge-data"; -import { MathBridgeOverview } from "./MathBridgeOverview"; -import { MathBridgeLevelContent } from "./MathBridgeLevelContent"; -import { - loadProgress, - saveProgress, - setConfidence, - recordCheckScore, - type ProgressMap, -} from "./math-bridge-progress"; - -const LEVEL_TABS: LevelTab[] = [ - "core_numeracy", - "high_school", - "pre_university", - "engineering_prep", -]; - -function isLevelTab(tab: MathBridgeTab): tab is LevelTab { - return (LEVEL_TABS as string[]).includes(tab); -} - -export function MathBridgeView() { - const search = useSearch({ strict: false }) as { tab?: string }; - const [progress, setProgress] = useState(loadProgress); - - const activeTab: MathBridgeTab = TAB_ORDER.includes( - search.tab as MathBridgeTab, - ) - ? (search.tab as MathBridgeTab) - : "overview"; - - const meta = TAB_META[activeTab]; - - const handleConfidence = useCallback( - (slug: string, confidence: 1 | 2 | 3) => { - setProgress((prev) => { - const next = setConfidence(prev, slug, confidence); - saveProgress(next); - return next; - }); - }, - [], - ); - - const handleCheckScore = useCallback((slug: string, score: number) => { - setProgress((prev) => { - const next = recordCheckScore(prev, slug, score); - saveProgress(next); - return next; - }); - }, []); - - return ( - - -
- {activeTab === "overview" && } - {isLevelTab(activeTab) && ( - - )} -
-
- ); -} diff --git a/frontend/src/views/MathLandingView.tsx b/frontend/src/views/MathLandingView.tsx deleted file mode 100644 index d9026e6..0000000 --- a/frontend/src/views/MathLandingView.tsx +++ /dev/null @@ -1,148 +0,0 @@ -import { useNavigate } from "@tanstack/react-router"; -import { motion } from "framer-motion"; -import { ChevronRight } from "lucide-react"; - -import { PremiumHero, PremiumPage } from "../components/layout/PremiumShell"; -import { useDocumentTitle } from "../hooks/useDocumentTitle"; - -/* ------------------------------------------------------------------ */ -/* Track topic data */ -/* ------------------------------------------------------------------ */ - -const ZERO_TO_ONE_TOPICS = [ - { tab: "methode", label: "Methode" }, - { tab: "diagnostic", label: "Diagnostic" }, - { tab: "college", label: "College (6e-3e)" }, - { tab: "lycee", label: "Seconde-Premiere" }, - { tab: "terminale", label: "Terminale S" }, - { tab: "evaluation", label: "Auto-evaluation" }, -] as const; - -const PREPA_ML_TOPICS = [ - { tab: "methode", label: "Methode" }, - { tab: "linear_algebra", label: "Algebre Lineaire" }, - { tab: "analysis", label: "Analyse" }, - { tab: "probability", label: "Probabilites & Stats" }, - { tab: "applied_ml", label: "Maths Appliquees ML" }, - { tab: "geometry_3d", label: "Geometrie & 3D" }, - { tab: "dynamics_physics", label: "Dynamique & Physique" }, - { tab: "evaluation", label: "Auto-evaluation" }, -] as const; - -const TRACKS = [ - { - key: "zero_to_one", - title: "Maths - Zero to One", - description: - "French curriculum from arithmetic through Terminale S - build fluency from scratch.", - color: "#55cdff", - topics: ZERO_TO_ONE_TOPICS, - }, - { - key: "prepa_ml", - title: "Maths - Prepa ML", - description: - "University-level math for machine learning - linear algebra, calculus, probability, and applied methods.", - color: "#ffc47c", - topics: PREPA_ML_TOPICS, - }, -] as const; - -/* ------------------------------------------------------------------ */ -/* Component */ -/* ------------------------------------------------------------------ */ - -export function MathLandingView() { - useDocumentTitle("Math"); - const navigate = useNavigate(); - - return ( - - - - {/* Curriculum tracks */} -
- {TRACKS.map((track, ti) => ( - - - -
- {track.topics.map((topic, i) => ( - - navigate({ - to: "/math/refresh", - search: { track: track.key, tab: topic.tab }, - }) - } - className="rounded-[4px] border border-white/[0.08] bg-white/[0.04] px-3 py-1.5 text-[15px] text-[#d0d6e0] transition-colors duration-150 hover:border-white/[0.15] hover:bg-white/[0.07] hover:text-[#f7f8f8]" - > - {topic.label} - - ))} -
-
- ))} -
- - {/* Bridge program */} - navigate({ to: "/math/bridge" })} - className="mt-6 w-full text-left rounded-[12px] border border-white/[0.08] bg-white/[0.02] p-6 transition-colors duration-150 hover:border-white/[0.12] hover:bg-white/[0.035]" - style={{ borderLeftWidth: 2, borderLeftColor: "#5e6ad2" }} - > -

- Math Bridge Program -

-

- Test your skills across four levels, track your readiness, and close - gaps with interactive micro-checks. -

-
-
- ); -} diff --git a/frontend/src/views/MathRefreshView.tsx b/frontend/src/views/MathRefreshView.tsx deleted file mode 100644 index 4698730..0000000 --- a/frontend/src/views/MathRefreshView.tsx +++ /dev/null @@ -1,2437 +0,0 @@ -import { useSearch } from "@tanstack/react-router"; - -import { PremiumHero, PremiumPage } from "../components/layout/PremiumShell"; -import { PracticeTab } from "../components/math-exercise/PracticeTab"; -import { useDocumentTitle } from "../hooks/useDocumentTitle"; - -/* ------------------------------------------------------------------ */ -/* Types */ -/* ------------------------------------------------------------------ */ - -type Track = "zero_to_one" | "prepa_ml"; - -type ZeroToOneTab = - | "methode" - | "diagnostic" - | "college" - | "lycee" - | "terminale" - | "pratique" - | "evaluation"; -type PrepaMlTab = - | "methode" - | "linear_algebra" - | "analysis" - | "probability" - | "applied_ml" - | "geometry_3d" - | "dynamics_physics" - | "pratique" - | "evaluation"; -type TabKey = ZeroToOneTab | PrepaMlTab; - -/* ------------------------------------------------------------------ */ -/* Track metadata */ -/* ------------------------------------------------------------------ */ - -const TRACKS: { key: Track; label: string; description: string }[] = [ - { - key: "zero_to_one", - label: "Zero to One", - description: - "From arithmetic fundamentals through the full French curriculum up to Terminale S - everything needed before prepa-level math.", - }, - { - key: "prepa_ml", - label: "Classe Prepa for ML", - description: - "MPSI/MP-level mathematics reframed for machine learning - linear algebra, analysis, probability, and optimization.", - }, -]; - -/* ------------------------------------------------------------------ */ -/* Tab metadata per track */ -/* ------------------------------------------------------------------ */ - -const ZERO_TO_ONE_TABS: { key: ZeroToOneTab; label: string; color: string }[] = - [ - { key: "methode", label: "Methode", color: "#5e6ad2" }, - { key: "diagnostic", label: "Diagnostic", color: "#ffc47c" }, - { key: "college", label: "College (6e-3e)", color: "#55cdff" }, - { key: "lycee", label: "Seconde-Premiere", color: "#f472b6" }, - { key: "terminale", label: "Terminale S", color: "#5bb86e" }, - { key: "pratique", label: "Pratique", color: "#eb5757" }, - { key: "evaluation", label: "Auto-evaluation", color: "#4ade80" }, - ]; - -const PREPA_ML_TABS: { key: PrepaMlTab; label: string; color: string }[] = [ - { key: "methode", label: "Methode", color: "#5e6ad2" }, - { key: "linear_algebra", label: "Algebre Lineaire", color: "#ffc47c" }, - { key: "analysis", label: "Analyse", color: "#55cdff" }, - { key: "probability", label: "Probabilites & Stats", color: "#f472b6" }, - { key: "applied_ml", label: "Maths Appliquees ML", color: "#5bb86e" }, - { key: "geometry_3d", label: "Geometrie & 3D", color: "#eb5757" }, - { key: "dynamics_physics", label: "Dynamique & Physique", color: "#4ade80" }, - { key: "pratique", label: "Pratique", color: "#55cdff" }, - { key: "evaluation", label: "Auto-evaluation", color: "#ffc47c" }, -]; - -function tabs_for_track(track: Track) { - return track === "zero_to_one" ? ZERO_TO_ONE_TABS : PREPA_ML_TABS; -} - -// eslint-disable-next-line @typescript-eslint/no-unused-vars -function default_tab(_track: Track): TabKey { - return "methode"; -} - -function is_valid_tab(track: Track, tab: string): tab is TabKey { - return tabs_for_track(track).some((t) => t.key === tab); -} - -/* ------------------------------------------------------------------ */ -/* Enriched data model */ -/* ------------------------------------------------------------------ */ - -interface TopicBlock { - stage: string; - focus: string; - target: string; - checkpoint: string; - concepts: string[]; - exercises: string[]; - mastery_checks: string[]; -} - -interface Resource { - type: "book" | "video" | "site" | "exercice"; - label: string; -} - -interface EnrichedTab { - overview: string; - prerequisites?: string[]; - estimated_weeks?: number; - daily_minutes?: number; - blocks: TopicBlock[]; - resources: Resource[]; - progression_gate: string[]; -} - -/* ------------------------------------------------------------------ */ -/* Data: Zero to One - Diagnostic & Arithmetique */ -/* ------------------------------------------------------------------ */ - -const DIAGNOSTIC_DATA: EnrichedTab = { - overview: - "Identifier ses lacunes et construire la fluence numerique de base. Chaque session cible un point faible precis avec feedback immediat.", - estimated_weeks: 12, - daily_minutes: 60, - blocks: [ - { - stage: "Semaine 0", - focus: "Diagnostic de positionnement", - target: - "Passer un test couvrant : operations de base, fractions, nombres negatifs, pourcentages, mise en equation simple.", - checkpoint: - "Grille de scores par theme. File de priorite creee pour chaque sujet en dessous de 70%.", - concepts: [ - "Addition, soustraction, multiplication, division (entiers et decimaux)", - "Fractions : simplification, addition, multiplication", - "Nombres negatifs : regles de signes", - "Pourcentages : calcul, conversion fraction/decimal", - "Mise en equation d'un probleme simple", - ], - exercises: [ - "Test diagnostic de 50 questions (10 par theme) en 45 minutes", - "Identifier les 3 themes les plus faibles pour priorisation", - "Chronometrer chaque section separement pour reperer la lenteur", - ], - mastery_checks: [ - "Je peux noter mes scores par theme et identifier les lacunes", - "Je peux classer mes sujets faibles par ordre de priorite", - ], - }, - { - stage: "Semaines 1-4", - focus: "Fluence numerique", - target: - "Operations sur entiers, priorite des operations, fractions avancees, decimaux, pourcentages.", - checkpoint: "Deux quiz consecutifs a 80% ou plus.", - concepts: [ - "Priorite des operations (PEMDAS) avec parentheses imbriquees", - "Fractions : PGCD, PPCM, addition avec denominateurs differents", - "Conversions : fraction <-> decimal <-> pourcentage", - "Calcul mental : tables de multiplication jusqu'a 15x15", - "Puissances de 2 (jusqu'a 2^12), puissances de 10", - "Estimation et ordres de grandeur", - ], - exercises: [ - "10 min de calcul mental rapide chaque matin (chronometre)", - "20 exercices de fractions par jour (difficulte croissante)", - "Convertir 20 nombres entre fraction/decimal/pourcentage", - "Quiz hebdomadaire de 30 questions en 20 minutes", - ], - mastery_checks: [ - "Je calcule 7/12 + 5/8 sans hesitation", - "Je convertis 0.375 en fraction simplifiee en moins de 10s", - "Je resous 3 + 2 x (4 - 1)^2 sans erreur de priorite", - "Je connais les tables de multiplication jusqu'a 12x12 instantanement", - ], - }, - { - stage: "Semaines 5-8", - focus: "Traduction de problemes", - target: - "Rapports, proportions, vitesses, et decomposition de problemes multi-etapes.", - checkpoint: "Convertir un enonce texte en equation sans indice.", - concepts: [ - "Produit en croix et proportionnalite", - "Rapports et echelles", - "Problemes de vitesse : distance = vitesse x temps", - "Problemes de melange et de pourcentage applique", - "Decomposition d'un probleme en sous-etapes", - "Verification par estimation avant calcul exact", - ], - exercises: [ - "Traduire 5 enonces texte par jour en equations", - "Resoudre 3 problemes de proportionnalite (recettes, echelles, vitesses)", - "Probleme du jour : un probleme multi-etapes complet", - "Exercices de verification : estimer la reponse avant de calculer", - ], - mastery_checks: [ - "Je peux lire un probleme et ecrire l'equation correspondante", - "Je sais decomposer un probleme complexe en 3-4 etapes simples", - "Je verifie mes reponses par estimation systematiquement", - "Je resous un probleme de vitesse/distance/temps sans formule affichee", - ], - }, - { - stage: "Semaines 9-12", - focus: "Pre-algebre et graphes", - target: - "Equations du premier degre, inegalites, lecture de graphes, notion de pente.", - checkpoint: "Test final mixte a 85% global, aucun theme sous 75%.", - concepts: [ - "Resolution d'equations du type ax + b = c", - "Inegalites et representation sur une droite", - "Repere cartesien : placer des points, lire des coordonnees", - "Notion de pente et ordonnee a l'origine", - "Lecture et interpretation de graphiques", - "Introduction aux fonctions : notion d'image et d'antecedent", - ], - exercises: [ - "Resoudre 15 equations du premier degre par jour", - "Tracer 5 droites a partir de leur equation y = ax + b", - "Lire et interpreter 3 graphiques varies (courbes, histogrammes)", - "Test cumulatif de fin de phase : 60 questions en 50 minutes", - ], - mastery_checks: [ - "Je resous ax + b = c pour tout a, b, c en moins de 30 secondes", - "Je trace y = 2x - 3 sans tableau de valeurs", - "Je lis les coordonnees d'un point et la pente d'une droite sur un graphe", - "Je passe le test final mixte a 85% minimum", - ], - }, - ], - resources: [ - { - type: "site", - label: "Khan Academy (francais) - Arithmetique et pre-algebre", - }, - { - type: "video", - label: "Yvan Monka (maths-et-tiques.fr) - Cours de college", - }, - { - type: "exercice", - label: "Mathenpoche (Sesamath) - Exercices interactifs", - }, - { - type: "book", - label: "Cahier Sesamath 6e/5e/4e/3e - Exercices progressifs", - }, - { type: "site", label: "Kwyk.fr - Exercices adaptatifs avec correction" }, - ], - progression_gate: [ - "Test final mixte a 85% global, aucun theme sous 75%", - "Calcul mental fluide : 20 operations en 2 minutes", - "Capacite a traduire un enonce en equation sans aide", - "Lecture de graphiques et notion de fonction acquises", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Zero to One - College (6e-3e) */ -/* ------------------------------------------------------------------ */ - -const COLLEGE_DATA: EnrichedTab = { - overview: - "Construire les fondations algebriques, geometriques et statistiques du programme de college. Objectif : niveau brevet mention Bien en maths.", - prerequisites: [ - "Test diagnostique valide a 85%+ (phase precedente)", - "Calcul mental fluide sur les 4 operations", - "Fractions et pourcentages maitrises", - ], - estimated_weeks: 16, - daily_minutes: 75, - blocks: [ - { - stage: "6e-5e", - focus: "Nombres et calcul litteral", - target: - "Nombres relatifs, priorite des operations avancee, initiation au calcul litteral, fractions avancees.", - checkpoint: "Resoudre 20 calculs mixtes en moins de 15 minutes a 90%.", - concepts: [ - "Nombres relatifs : addition, soustraction, multiplication", - "Calcul litteral : simplification d'expressions, substitution", - "Fractions : operations avec nombres relatifs", - "Puissances : regles de calcul (a^n x a^m, (a^n)^m)", - "Divisibilite : criteres, nombres premiers, decomposition", - "Ecriture scientifique", - ], - exercises: [ - "20 calculs avec nombres relatifs par jour (chronometre)", - "Simplifier 10 expressions litterales (developper, reduire)", - "Decomposer 10 nombres en produit de facteurs premiers", - "Quiz mixte hebdomadaire de 25 questions en 20 min", - ], - mastery_checks: [ - "Je calcule (-3) x (-7) + 4 x (-2) instantanement", - "Je simplifie 3a + 2b - a + 5b sans erreur", - "Je decompose 360 en facteurs premiers en moins de 30s", - "Je convertis en ecriture scientifique sans hesitation", - ], - }, - { - stage: "4e", - focus: "Algebre et geometrie", - target: - "Developper et factoriser, Pythagore, Thales, puissances, notion de fonction.", - checkpoint: "Demontrer un resultat geometrique avec redaction complete.", - concepts: [ - "Identites remarquables : (a+b)^2, (a-b)^2, a^2-b^2", - "Factorisation : mise en facteur, identites remarquables", - "Theoreme de Pythagore : direct et reciproque", - "Theoreme de Thales : direct et reciproque", - "Cosinus d'un angle aigu dans un triangle rectangle", - "Notion de fonction : image, antecedent, tableau de valeurs", - "Translations et rotations", - ], - exercises: [ - "Developper et factoriser 15 expressions par jour", - "5 exercices de Pythagore par jour (calcul + demonstration)", - "3 exercices de Thales (avec figure a tracer)", - "Rediger 2 demonstrations geometriques completes par semaine", - "Construire le tableau de valeurs de 3 fonctions et tracer", - ], - mastery_checks: [ - "Je factorise x^2 - 9 et 4x^2 + 12x + 9 en moins de 15s", - "Je determine si un triangle est rectangle avec Pythagore", - "Je calcule une longueur avec Thales en redigeant proprement", - "Je redige une demonstration avec hypothese -> calcul -> conclusion", - ], - }, - { - stage: "3e", - focus: "Fonctions et statistiques", - target: - "Fonctions lineaires et affines, systemes d'equations, probabilites simples, statistiques descriptives.", - checkpoint: "Brevet blanc a 16/20 ou plus en mathematiques.", - concepts: [ - "Fonctions lineaires : f(x) = ax, proportionnalite", - "Fonctions affines : f(x) = ax + b, representation graphique", - "Systemes de 2 equations a 2 inconnues (substitution, combinaison)", - "Probabilites : experience aleatoire, equiprobabilite, arbre", - "Statistiques : moyenne, mediane, etendue, quartiles", - "Notion de racine carree, equations x^2 = a", - "Trigonometrie : sin, cos, tan dans le triangle rectangle", - ], - exercises: [ - "Resoudre 5 systemes d'equations par jour (2 methodes)", - "Tracer 3 fonctions affines et determiner intersection", - "5 problemes de probabilites avec arbre", - "Calculer moyenne, mediane, quartiles sur 3 series de donnees", - "Un brevet blanc complet chaque quinzaine", - ], - mastery_checks: [ - "Je resous un systeme 2x2 par substitution ET combinaison", - "Je determine la fonction affine passant par 2 points donnes", - "Je calcule P(A) avec un arbre de probabilites sans erreur", - "Je calcule moyenne et mediane d'une serie statistique groupee", - "Je resous un probleme de trigonometrie (sin/cos/tan) complet", - ], - }, - { - stage: "Transversal", - focus: "Redaction et rigueur mathematique", - target: - "Structurer un raisonnement, ecrire une demonstration, maitriser la notation mathematique.", - checkpoint: - "Chaque solution redigee suit le format hypothese -> calcul -> conclusion.", - concepts: [ - "Structure d'une demonstration : hypotheses, deductions, conclusion", - "Notation ensembliste de base : appartenance, inclusion", - "Quantificateurs implicites : pour tout, il existe", - "Raisonnement par l'absurde (introduction)", - "Contre-exemple pour infirmer une conjecture", - "Presentation propre d'un calcul en colonnes alignees", - ], - exercises: [ - "Rediger 2 demonstrations completes par semaine", - "Corriger 3 redactions volontairement mal ecrites (trouver les erreurs)", - "Ecrire un contre-exemple pour 5 affirmations fausses", - "Recopier proprement 3 solutions en soignant la presentation", - ], - mastery_checks: [ - "Mes solutions suivent toujours hypothese -> calcul -> conclusion", - "Je sais ecrire une demonstration de Pythagore complete et rigoureuse", - "Je peux donner un contre-exemple a 'tout nombre pair est divisible par 4'", - "Ma notation mathematique est correcte et lisible", - ], - }, - ], - resources: [ - { - type: "video", - label: "Yvan Monka (maths-et-tiques.fr) - Tout le programme college", - }, - { type: "site", label: "Sesamath - Manuels et exercices gratuits" }, - { type: "exercice", label: "Annales du brevet - Sujets corriges (APMEP)" }, - { type: "book", label: "Transmath 3e (Nathan) - Manuel de reference" }, - { type: "site", label: "Labomep - Exercices interactifs Sesamath" }, - { type: "video", label: "Jaicompris.com - Exercices corriges en video" }, - ], - progression_gate: [ - "Brevet blanc a 16/20 minimum", - "Systemes d'equations resolus sans erreur", - "Demonstrations geometriques redigees proprement", - "Trigonometrie de base (sin/cos/tan) maitrisee", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Zero to One - Seconde-Premiere S */ -/* ------------------------------------------------------------------ */ - -const LYCEE_DATA: EnrichedTab = { - overview: - "Maitriser les fonctions, la derivation, la trigonometrie et les suites. Socle indispensable avant la Terminale S.", - prerequisites: [ - "Brevet blanc valide a 16/20+", - "Identites remarquables et factorisation fluides", - "Systemes d'equations et fonctions affines maitrises", - "Trigonometrie dans le triangle rectangle acquise", - ], - estimated_weeks: 20, - daily_minutes: 90, - blocks: [ - { - stage: "Seconde", - focus: "Fonctions de reference et ensembles", - target: - "Fonctions de reference (carree, inverse, racine), intervalles, inequations, vecteurs du plan.", - checkpoint: - "Tracer et analyser toute fonction de reference sans calculatrice.", - concepts: [ - "Intervalles de R, reunion, intersection", - "Fonction carree : parabole, variations, signe", - "Fonction inverse : hyperbole, asymptotes, variations", - "Fonction racine carree : domaine, variations", - "Valeur absolue : definition, distance", - "Inequations : resolution graphique et algebrique", - "Vecteurs : addition, multiplication par un scalaire, colinearite", - "Equation de droite : vecteur directeur, coefficient directeur", - ], - exercises: [ - "Tracer les 3 fonctions de reference de memoire, avec tableau de variations", - "Resoudre 10 inequations par jour (graphique + algebrique)", - "5 exercices de vecteurs par jour (construction + calcul)", - "Determiner l'equation d'une droite de 5 facons differentes", - "Composition de fonctions : domaine et variations (5/jour)", - ], - mastery_checks: [ - "Je trace f(x) = 1/x avec asymptotes, variations, et signes de memoire", - "Je resous |2x - 3| < 5 algebriquement et graphiquement", - "Je calcule AB comme vecteur et je verifie la colinearite avec CD", - "Je determine l'equation de la droite passant par A et B en 30s", - ], - }, - { - stage: "Seconde", - focus: "Statistiques et probabilites", - target: - "Moyenne, mediane, ecart-type, simulations, probabilites conditionnelles basiques.", - checkpoint: - "Interpreter un jeu de donnees et calculer ses indicateurs sans hesitation.", - concepts: [ - "Variance et ecart-type : calcul et interpretation", - "Diagrammes en boite (box plots) : construction et lecture", - "Echantillonnage : fluctuation d'echantillonnage", - "Probabilites : loi de probabilite, esperance", - "Probabilites conditionnelles : introduction, arbre pondere", - "Simulation : loi des grands nombres (intuition)", - ], - exercises: [ - "Calculer moyenne, variance, ecart-type sur 3 series par jour", - "Construire et interpreter 2 diagrammes en boite", - "3 problemes de probabilites conditionnelles avec arbre pondere", - "Simuler 100 lancers de de (tableur ou Python) et comparer a la theorie", - ], - mastery_checks: [ - "Je calcule l'ecart-type d'une serie a la main sans erreur", - "Je construis un diagramme en boite et je compare deux series", - "Je calcule P(A|B) avec un arbre pondere correctement", - "Je sais ce que mesure l'ecart-type intuitivement", - ], - }, - { - stage: "Premiere S", - focus: "Derivation et suites", - target: - "Nombre derive, tangente, tableau de variations, suites arithmetiques et geometriques.", - checkpoint: - "Etude complete d'une fonction polynome degre 3 (derivee, variations, extremums).", - concepts: [ - "Taux de variation et nombre derive", - "Derivees des fonctions de reference : x^n, 1/x, racine de x", - "Regles de derivation : somme, produit, quotient, composee", - "Tangente a une courbe en un point", - "Tableau de variations a partir du signe de f'", - "Extremums locaux et globaux", - "Suites arithmetiques : terme general, somme des n premiers termes", - "Suites geometriques : terme general, somme, convergence", - "Raisonnement par recurrence (introduction)", - ], - exercises: [ - "Deriver 20 fonctions par jour (difficulte croissante)", - "Etude complete d'une fonction : derivee -> signe -> variations -> extremums", - "Calculer l'equation de la tangente en 5 points differents", - "10 exercices de suites par jour (terme general, somme, sens de variation)", - "Une preuve par recurrence par semaine", - ], - mastery_checks: [ - "Je derive f(x) = (2x+1)/(x^2-3) sans erreur en moins d'une minute", - "Je fais l'etude complete d'un polynome de degre 3 en autonomie", - "Je calcule la somme des 100 premiers termes d'une suite arithmetique", - "Je determine la convergence d'une suite geometrique de raison q", - "Je redige une preuve par recurrence complete", - ], - }, - { - stage: "Premiere S", - focus: "Trigonometrie et produit scalaire", - target: - "Cercle trigonometrique, formules d'addition, produit scalaire, applications geometriques.", - checkpoint: - "Resoudre cos(x) = k et sin(x) = k sur [0, 2pi] sans formule affichee.", - concepts: [ - "Cercle trigonometrique : mesure d'angles en radians", - "Valeurs remarquables : sin et cos de 0, pi/6, pi/4, pi/3, pi/2", - "Formules d'addition : cos(a+b), sin(a+b)", - "Formules de duplication : cos(2a), sin(2a)", - "Equations trigonometriques : cos(x) = k, sin(x) = k", - "Produit scalaire : definition, proprietes, calcul", - "Applications : orthogonalite, projection, distance", - "Formule d'Al-Kashi", - ], - exercises: [ - "Placer 20 angles sur le cercle trigonometrique de memoire", - "Calculer sin(7pi/12) en utilisant les formules d'addition", - "Resoudre 5 equations trigonometriques par jour", - "5 exercices de produit scalaire (calcul + applications geometriques)", - "Demontrer l'orthogonalite de deux droites avec le produit scalaire", - ], - mastery_checks: [ - "Je connais les valeurs de sin et cos pour les angles remarquables par coeur", - "Je resous cos(x) = 1/2 sur [0, 2pi] en moins de 30s", - "Je calcule le produit scalaire de deux vecteurs de deux facons differentes", - "J'utilise la formule d'Al-Kashi pour calculer un cote ou un angle", - ], - }, - ], - resources: [ - { - type: "video", - label: "Yvan Monka (maths-et-tiques.fr) - Cours Seconde & Premiere", - }, - { - type: "book", - label: "Declic Mathematiques 1re S (Hachette) - Manuel + exercices", - }, - { type: "site", label: "Annabac.com - Sujets corriges par chapitre" }, - { - type: "video", - label: "3Blue1Brown (Essence of Calculus) - Intuition derivation", - }, - { type: "exercice", label: "XMaths.free.fr - Exercices corriges lycee" }, - { - type: "site", - label: "Geogebra - Visualisation interactive des fonctions", - }, - ], - progression_gate: [ - "Etude complete d'une fonction polynome degre 3 en autonomie", - "Derivation fluide (toutes regles) sans notes", - "Equations trigonometriques resolues sur [0, 2pi]", - "Suites : terme general, somme, convergence maitrises", - "Preuve par recurrence simple redigee correctement", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Zero to One - Terminale S */ -/* ------------------------------------------------------------------ */ - -const TERMINALE_DATA: EnrichedTab = { - overview: - "Derniere marche avant la prepa. Maitriser l'integration, l'exponentielle, le logarithme, les nombres complexes et les lois continues.", - prerequisites: [ - "Derivation fluide (toutes regles)", - "Trigonometrie et produit scalaire maitrises", - "Suites arithmetiques et geometriques acquises", - "Preuve par recurrence compris", - ], - estimated_weeks: 24, - daily_minutes: 90, - blocks: [ - { - stage: "Analyse", - focus: "Limites et continuite", - target: - "Limites de suites et de fonctions, formes indeterminees, theoreme des valeurs intermediaires.", - checkpoint: - "Calculer toute limite classique et lever les formes indeterminees standard.", - concepts: [ - "Limite d'une suite : definition, operations, formes indeterminees", - "Limite d'une fonction en un point et en l'infini", - "Formes indeterminees : 0/0, inf/inf, inf-inf, 0 x inf", - "Asymptotes horizontales, verticales, obliques", - "Continuite : definition, TVI (theoreme des valeurs intermediaires)", - "Theoreme de comparaison et theoreme des gendarmes", - "Suites adjacentes", - ], - exercises: [ - "Calculer 20 limites par jour (difficulte progressive)", - "Lever 10 formes indeterminees (factorisation, conjugue, taux de variation)", - "Determiner les asymptotes de 5 fonctions rationnelles", - "Appliquer le TVI pour prouver l'existence de solutions", - "3 exercices de theoreme des gendarmes par semaine", - ], - mastery_checks: [ - "Je leve la forme 0/0 par factorisation ou quantite conjuguee", - "Je determine toutes les asymptotes d'une fonction rationnelle", - "J'applique le TVI pour montrer qu'une equation a une solution", - "Je calcule la limite d'une suite definie par recurrence", - ], - }, - { - stage: "Analyse", - focus: "Integration", - target: - "Primitive, integrale sur un intervalle, calcul d'aires, integration par parties.", - checkpoint: - "Calculer l'aire entre deux courbes et justifier chaque etape.", - concepts: [ - "Primitive d'une fonction : definition, primitives de reference", - "Integrale de a a b : interpretation geometrique (aire signee)", - "Proprietes : linearite, relation de Chasles, positivite", - "Valeur moyenne d'une fonction", - "Integration par parties", - "Aire entre deux courbes", - "Integrales et suites (suites d'integrales)", - ], - exercises: [ - "Trouver la primitive de 15 fonctions par jour", - "Calculer 10 integrales definies (avec verification par aire)", - "3 calculs d'aire entre deux courbes par jour", - "5 integrations par parties par semaine (u'v = uv - integrale de uv')", - "Exercices de suites d'integrales : calculer I_n en fonction de I_{n-1}", - ], - mastery_checks: [ - "Je trouve la primitive de sin, cos, e^x, 1/x, x^n instantanement", - "Je calcule l'aire entre f et g sur [a,b] avec la bonne formule", - "Je fais une integration par parties sans erreur de signe", - "Je calcule la valeur moyenne d'une fonction sur un intervalle", - ], - }, - { - stage: "Analyse", - focus: "Exponentielle et logarithme", - target: - "Fonction exponentielle, fonction ln, croissances comparees, equations differentielles y' = ay.", - checkpoint: - "Resoudre un probleme type bac melant exp, ln, et derivation en autonomie.", - concepts: [ - "Fonction exponentielle : proprietes algebriques, derivee, limites", - "Fonction logarithme neperien : proprietes, derivee, limites", - "Croissances comparees : x^n vs e^x, ln(x) vs x^a", - "Equation differentielle y' = ay : solution y = Ce^(ax)", - "Equations et inequations avec exp et ln", - "Etude complete de fonctions du type f(x) = xe^(-x), f(x) = ln(x)/x", - "Composees : derivee de e^(u(x)), ln(u(x))", - ], - exercises: [ - "Deriver 10 fonctions contenant exp ou ln par jour", - "Etude complete de f(x) = (x-1)e^(-x) : limites, derivee, variations, courbe", - "Resoudre 5 equations avec exp/ln par jour", - "3 problemes de croissances comparees", - "Resoudre y' = 2y + 3 par changement de variable (2/semaine)", - ], - mastery_checks: [ - "Je sais que (e^x)' = e^x et (ln x)' = 1/x sans reflechir", - "Je determine lim x->+inf x^n / e^x = 0 et j'explique pourquoi", - "Je resous e^(2x) - 3e^x + 2 = 0 par substitution", - "Je fais l'etude complete d'une fonction exp/ln en 20 minutes", - ], - }, - { - stage: "Algebre & Proba", - focus: "Nombres complexes et lois continues", - target: - "Forme algebrique et trigonometrique, loi normale, intervalle de confiance, loi exponentielle.", - checkpoint: "Epreuve type bac complete en 4 heures a 14/20 minimum.", - concepts: [ - "Nombres complexes : forme algebrique a + ib, module, argument", - "Forme trigonometrique et exponentielle : z = r.e^(i.theta)", - "Operations : addition, multiplication, conjugue, division", - "Interpretation geometrique : rotation, homothetie", - "Loi normale : proprietes, table, intervalle de confiance", - "Loi exponentielle : densite, esperance, absence de memoire", - "Intervalle de confiance au seuil de 95%", - "Prise de decision et tests d'hypotheses (introduction)", - ], - exercises: [ - "10 calculs avec nombres complexes par jour (4 operations)", - "Passer de la forme algebrique a la forme exponentielle (10/jour)", - "Interpreter geometriquement 5 multiplications par un complexe", - "5 exercices de loi normale avec table", - "Construire 3 intervalles de confiance a 95%", - "Un sujet type bac complet (4h) chaque quinzaine", - ], - mastery_checks: [ - "Je passe de a+ib a r.e^(i.theta) sans hesitation", - "Je multiplie deux complexes en forme exponentielle", - "J'interprete z' = e^(i.pi/3).z comme une rotation de 60 degres", - "Je calcule P(mu-2sigma < X < mu+2sigma) pour X normale", - "Je construis un intervalle de confiance et j'interprete le resultat", - ], - }, - ], - resources: [ - { - type: "book", - label: "Annales Bac S Mathematiques (APMEP) - Sujets corriges", - }, - { type: "video", label: "Yvan Monka - Tout le programme Terminale S" }, - { - type: "video", - label: "3Blue1Brown (Essence of Linear Algebra) - Intro complexes", - }, - { type: "site", label: "Bibmath.net - Fiches de cours et exercices" }, - { - type: "exercice", - label: "Bacamaths.net - Exercices corriges par chapitre", - }, - { type: "site", label: "Wolfram Alpha - Verification de calculs" }, - ], - progression_gate: [ - "Epreuve type bac complete en 4h a 14/20 minimum", - "Aucun chapitre en dessous de 12/20", - "Resolution autonome de problemes de synthese multi-chapitres", - "Calcul fluide sans calculatrice sur les operations standard", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Prepa ML - Algebre Lineaire */ -/* ------------------------------------------------------------------ */ - -const LINEAR_ALGEBRA_DATA: EnrichedTab = { - overview: - "De l'espace vectoriel abstrait jusqu'a la SVD et la PCA. Chaque theoreme doit pouvoir se traduire en code.", - prerequisites: [ - "Programme complet de Terminale S maitrise", - "Systemes d'equations et matrices (bases)", - "Calcul avec nombres complexes", - ], - estimated_weeks: 12, - daily_minutes: 120, - blocks: [ - { - stage: "Fondations", - focus: "Espaces vectoriels", - target: - "Sous-espaces, familles libres/liees/generatrices, bases, dimension, somme directe.", - checkpoint: - "Determiner une base d'un sous-espace de R^n defini par un systeme.", - concepts: [ - "Espace vectoriel : axiomes, exemples (R^n, polynomes, matrices)", - "Sous-espace vectoriel : definition, intersection, somme", - "Famille libre, famille liee : definition et criteres", - "Famille generatrice, base, dimension", - "Theoreme de la base incomplite", - "Somme directe de sous-espaces : definition et critere", - "Sous-espaces supplementaires", - "Rang d'une famille de vecteurs", - ], - exercises: [ - "Verifier qu'un ensemble est un sous-espace vectoriel (5/jour)", - "Determiner si une famille est libre ou liee (extraire une base)", - "Trouver une base et la dimension de ker(A) pour 5 matrices", - "Prouver que deux sous-espaces sont supplementaires", - "Implementer la verification de liberte en Python/NumPy", - ], - mastery_checks: [ - "Je verifie les 3 conditions de sous-espace vectoriel sans notes", - "Je determine une base d'un sous-espace defini par des equations", - "Je calcule la dimension d'un espace defini par une famille generatrice", - "Je sais que dim(E+F) = dim(E) + dim(F) - dim(E inter F)", - ], - }, - { - stage: "Matrices", - focus: "Calcul matriciel et applications lineaires", - target: - "Produit matriciel, rang, noyau, image, matrice de passage, changement de base.", - checkpoint: - "Exprimer une application lineaire dans deux bases et verifier la coherence.", - concepts: [ - "Produit matriciel : definition, proprietes (non commutativite)", - "Matrice inversible : definition, calcul de l'inverse, criteres", - "Application lineaire : definition, noyau, image", - "Theoreme du rang : dim(E) = dim(ker f) + dim(im f)", - "Matrice d'une application lineaire dans des bases donnees", - "Matrice de passage et formule de changement de base", - "Systemes lineaires : methode de Gauss, pivot", - "Rang d'une matrice : pivots, mineurs", - ], - exercises: [ - "Calculer 10 produits matriciels par jour (2x2, 3x3)", - "Inverser 5 matrices par la methode de Gauss", - "Pour 3 applications lineaires : calculer noyau, image, rang", - "Effectuer 3 changements de base complets", - "Resoudre 5 systemes lineaires par pivot de Gauss", - "Implementer la multiplication matricielle from scratch en Python", - ], - mastery_checks: [ - "Je multiplie deux matrices 3x3 sans erreur en 3 minutes", - "J'inverse une matrice 3x3 par augmentation [A|I]", - "J'applique le theoreme du rang pour deduire la surjectivite", - "Je change de base : M' = P^(-1).M.P sans me tromper", - ], - }, - { - stage: "Reduction", - focus: "Diagonalisation et trigonalisation", - target: - "Valeurs propres, vecteurs propres, polynome caracteristique, diagonalisation, Cayley-Hamilton.", - checkpoint: "Diagonaliser une matrice 3x3 et calculer A^n explicitement.", - concepts: [ - "Valeur propre, vecteur propre, sous-espace propre", - "Polynome caracteristique : det(A - lambda.I)", - "Critere de diagonalisabilite : somme des dim(E_lambda) = n", - "Matrice diagonale : A = P.D.P^(-1), calcul de A^n", - "Trigonalisation (theoreme de Schur)", - "Theoreme de Cayley-Hamilton", - "Matrices symetriques : diagonalisation en base orthonormee", - "Matrices positives, semi-definies positives", - ], - exercises: [ - "Diagonaliser 3 matrices par jour (2x2 puis 3x3)", - "Calculer A^n pour 5 matrices diagonalisables", - "Verifier Cayley-Hamilton sur 5 matrices", - "Diagonaliser en base orthonormee une matrice symetrique", - "Implementer la diagonalisation en Python et comparer a numpy.linalg.eig", - ], - mastery_checks: [ - "Je calcule le polynome caracteristique d'une matrice 3x3", - "Je diagonalise une matrice et je calcule A^100", - "Je sais pourquoi une matrice symetrique est toujours diagonalisable", - "Je verifie le theoreme de Cayley-Hamilton sur un exemple", - ], - }, - { - stage: "ML Link", - focus: "SVD, projections, moindres carres", - target: - "Decomposition en valeurs singulieres, projection orthogonale, pseudo-inverse, PCA comme rotation.", - checkpoint: - "Implementer une PCA from scratch et interpreter les composantes principales.", - concepts: [ - "Produit scalaire, norme, orthogonalite dans R^n", - "Projection orthogonale sur un sous-espace", - "Moindres carres : formulation et solution (A^T.A)^(-1).A^T.b", - "Decomposition en valeurs singulieres (SVD) : A = U.Sigma.V^T", - "Pseudo-inverse de Moore-Penrose", - "PCA : matrice de covariance, valeurs propres, composantes principales", - "Interpretation geometrique de la SVD : rotation + echelle + rotation", - "Application : compression d'images, reduction de dimension", - ], - exercises: [ - "Calculer la projection orthogonale sur un plan dans R^3", - "Resoudre un probleme de moindres carres (regression lineaire)", - "Calculer la SVD d'une matrice 2x3 a la main", - "Implementer PCA from scratch (NumPy only) sur un dataset", - "Comparer votre PCA a sklearn.decomposition.PCA", - "Visualiser les composantes principales sur un dataset 2D", - ], - mastery_checks: [ - "Je calcule la projection orthogonale de b sur im(A)", - "Je resous min ||Ax - b||^2 par la formule des moindres carres", - "J'explique ce que sont U, Sigma, V dans A = U.Sigma.V^T", - "J'implemente PCA en 20 lignes de NumPy et j'interprete les resultats", - "Je sais pourquoi les valeurs singulieres indiquent l'importance des composantes", - ], - }, - ], - resources: [ - { type: "book", label: "Tout-en-un MPSI (Dunod) - Algebre lineaire" }, - { - type: "video", - label: "3Blue1Brown - Essence of Linear Algebra (17 episodes)", - }, - { - type: "book", - label: "Introduction to Linear Algebra - Gilbert Strang (MIT)", - }, - { type: "site", label: "Exo7 - Exercices de mathematiques (niveau prepa)" }, - { - type: "video", - label: "MIT 18.06 - Gilbert Strang (cours complet en ligne)", - }, - { - type: "exercice", - label: "Bibmath - Exercices corriges d'algebre lineaire", - }, - { - type: "site", - label: "Immersive Linear Algebra (immersivemath.com) - Visualisations", - }, - ], - progression_gate: [ - "Diagonaliser une matrice 3x3 et calculer A^n", - "Implementer PCA from scratch en NumPy", - "Expliquer la SVD geometriquement", - "Resoudre un probleme de moindres carres complet", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Prepa ML - Analyse */ -/* ------------------------------------------------------------------ */ - -const ANALYSIS_DATA: EnrichedTab = { - overview: - "Series, calcul differentiel multivariable, et optimisation - les outils mathematiques au coeur du machine learning.", - prerequisites: [ - "Integration et derivation maitrisees (Terminale S)", - "Exponentielle et logarithme fluides", - "Limites et continuite acquises", - ], - estimated_weeks: 14, - daily_minutes: 120, - blocks: [ - { - stage: "Suites & Series", - focus: "Convergence et series numeriques", - target: - "Suites recurrentes, series a termes positifs, series alternees, convergence absolue, rayon de convergence.", - checkpoint: "Etudier la convergence de 10 series classiques sans notes.", - concepts: [ - "Suites monotones bornees : theoreme de convergence", - "Suites recurrentes : point fixe, convergence", - "Series numeriques : convergence, somme partielle, reste", - "Series a termes positifs : comparaison, equivalent, Riemann", - "Series alternees : critere de Leibniz", - "Convergence absolue vs convergence simple", - "Series entieres : rayon de convergence, somme", - "Developpements en serie entiere : e^x, sin(x), 1/(1-x)", - ], - exercises: [ - "Etudier la convergence de 5 series par jour (criteres varies)", - "Calculer le rayon de convergence de 3 series entieres par jour", - "Determiner le point fixe de 3 suites recurrentes", - "Developper 5 fonctions en serie entiere", - "Comparer sommes partielles numeriquement en Python", - ], - mastery_checks: [ - "Je sais que sum 1/n^2 converge et sum 1/n diverge, et pourquoi", - "Je determine le rayon de convergence par le critere de d'Alembert", - "J'ecris le developpement de e^x, sin(x), ln(1+x) de memoire", - "J'applique le critere de Leibniz sur une serie alternee", - ], - }, - { - stage: "Calcul differentiel", - focus: "Fonctions de plusieurs variables", - target: - "Derivees partielles, gradient, jacobienne, hessienne, developpements de Taylor multivarles.", - checkpoint: - "Calculer le gradient et la hessienne d'une loss function type MSE.", - concepts: [ - "Derivees partielles : definition, calcul, interpretation", - "Gradient : vecteur des derivees partielles, direction de plus forte pente", - "Matrice jacobienne : derivees partielles d'une application vectorielle", - "Matrice hessienne : derivees partielles secondes", - "Differentiabilite et differentielle", - "Formule de Taylor multi-variable (ordre 1 et 2)", - "Chain rule multivariable : differentielle d'une composee", - "Application : gradient de ||Ax - b||^2, gradient de cross-entropy", - ], - exercises: [ - "Calculer le gradient de 10 fonctions R^n -> R par jour", - "Calculer la jacobienne de 5 applications R^n -> R^m", - "Calculer la hessienne de 5 fonctions et determiner sa nature", - "Appliquer la chain rule sur des composees a 3 niveaux", - "Calculer le gradient de la MSE loss a la main", - "Verifier vos calculs avec autograd (JAX ou PyTorch)", - ], - mastery_checks: [ - "Je calcule le gradient de f(x,y) = x^2.y + sin(xy) en 30s", - "J'ecris la jacobienne d'une application R^3 -> R^2", - "J'applique la chain rule : d/dx f(g(x)) pour f et g vectorielles", - "Je calcule le gradient de la MSE et de la cross-entropy loss", - ], - }, - { - stage: "Optimisation", - focus: "Convexite et descente de gradient", - target: - "Fonctions convexes, conditions KKT, multiplicateurs de Lagrange, gradient descent, learning rate.", - checkpoint: - "Prouver la convergence du gradient descent sur une fonction L-smooth convexe.", - concepts: [ - "Convexite : definition, critere du second ordre (hessienne >= 0)", - "Minimum global = minimum local pour les fonctions convexes", - "Multiplicateurs de Lagrange : optimisation sous contrainte d'egalite", - "Conditions KKT : optimisation sous contrainte d'inegalite", - "Descente de gradient : algorithme, convergence, choix du learning rate", - "Fonctions L-smooth : gradient Lipschitz, taux de convergence O(1/k)", - "Fonctions mu-fortement convexes : convergence lineaire", - "Dualite de Lagrange (introduction)", - ], - exercises: [ - "Verifier la convexite de 5 fonctions par la hessienne", - "Optimiser 3 fonctions sous contrainte par Lagrange", - "Implementer le gradient descent en Python sur f(x) = ||Ax-b||^2", - "Tracer la courbe de convergence pour differents learning rates", - "Prouver la convergence sur un cas convexe L-smooth", - "Comparer gradient descent, Newton, et gradient descent accelere", - ], - mastery_checks: [ - "Je verifie la convexite par la hessienne semi-definie positive", - "Je resous min f(x) sous g(x) = 0 par Lagrange", - "J'implemente gradient descent et je choisis un bon learning rate", - "J'explique pourquoi la convexite garantit un minimum global", - ], - }, - { - stage: "Integration avancee", - focus: "Integrales multiples et mesure", - target: - "Integrales doubles/triples, changement de variables, Fubini, introduction a la mesure de Lebesgue.", - checkpoint: - "Calculer l'esperance d'une loi continue multivariee par integration directe.", - concepts: [ - "Integrales doubles : definition, calcul par Fubini", - "Changement de variables : polaires, spheriques, jacobien", - "Integrales triples et applications (volume, masse)", - "Integrale de Gauss : integrale de e^(-x^2)", - "Introduction a la mesure de Lebesgue", - "Theoremes de convergence dominee et monotone (enonces)", - "Application : esperance et variance de lois multivariees", - ], - exercises: [ - "Calculer 5 integrales doubles par jour (Fubini)", - "Effectuer 3 changements en coordonnees polaires", - "Calculer le volume de solides par integrales triples", - "Retrouver l'integrale de Gauss par le calcul en polaires", - "Calculer E[X] et Var(X) pour une loi a densite bivariee", - ], - mastery_checks: [ - "Je calcule une integrale double en inversant l'ordre d'integration", - "Je passe en coordonnees polaires avec le bon jacobien (r)", - "Je retrouve integrale de e^(-x^2) dx = sqrt(pi) par le calcul", - "Je calcule l'esperance d'une loi bivariee par integration directe", - ], - }, - ], - resources: [ - { type: "book", label: "Tout-en-un MPSI/MP (Dunod) - Analyse" }, - { type: "video", label: "MIT 18.02 Multivariable Calculus - Denis Auroux" }, - { type: "site", label: "Exo7 - Exercices d'analyse (CPGE)" }, - { - type: "book", - label: "Convex Optimization - Boyd & Vandenberghe (Stanford, gratuit)", - }, - { type: "video", label: "3Blue1Brown - Essence of Calculus" }, - { type: "exercice", label: "Les-mathematiques.net - Forum et exercices" }, - ], - progression_gate: [ - "Etudier la convergence de series classiques sans notes", - "Calculer gradient et hessienne de fonctions ML courantes", - "Prouver la convergence du gradient descent (cas convexe)", - "Calculer des integrales multiples avec changement de variables", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Prepa ML - Probabilites & Stats */ -/* ------------------------------------------------------------------ */ - -const PROBABILITY_DATA: EnrichedTab = { - overview: - "De l'espace probabilise jusqu'a l'estimation par maximum de vraisemblance - le langage mathematique de l'incertitude et de l'apprentissage.", - prerequisites: [ - "Integration (simple et multiple) maitrisee", - "Series numeriques acquises", - "Denombrement et probabilites de base (Terminale)", - ], - estimated_weeks: 12, - daily_minutes: 120, - blocks: [ - { - stage: "Fondations", - focus: "Espaces probabilises", - target: - "Tribus, probabilites, conditionnement, independance, formule de Bayes, denombrement.", - checkpoint: - "Resoudre un probleme de Bayes multi-etapes avec arbre et calcul formel.", - concepts: [ - "Espace probabilise (Omega, F, P) : definition formelle", - "Tribu : definition, exemples, tribu engendree", - "Probabilite conditionnelle : P(A|B), formule des probabilites totales", - "Independance d'evenements et de tribus", - "Formule de Bayes et inference bayesienne", - "Denombrement : arrangements, combinaisons, formule du binome", - "Principe d'inclusion-exclusion", - ], - exercises: [ - "Resoudre 5 problemes de Bayes par jour (difficulte croissante)", - "Calculer P(A|B) dans 5 contextes differents (medical, spam, etc.)", - "10 exercices de denombrement par jour", - "Modeliser 3 problemes reels en espace probabilise formel", - "Implementer un classificateur bayesien naif en Python", - ], - mastery_checks: [ - "Je formule P(maladie|test+) avec la formule de Bayes complet", - "Je distingue P(A|B) et P(B|A) sans confusion", - "Je calcule C(n,k) et j'applique la formule du binome", - "Je verifie l'independance de deux evenements formellement", - ], - }, - { - stage: "Variables aleatoires", - focus: "Lois discretes et continues", - target: - "Esperance, variance, lois classiques, vecteurs aleatoires, fonctions generatrices.", - checkpoint: - "Identifier la loi, calculer E[X] et Var(X) pour tout probleme de modelisation courant.", - concepts: [ - "Variable aleatoire discrete : loi, esperance, variance", - "Lois classiques : Bernoulli, binomiale, Poisson, geometrique", - "Variable aleatoire continue : densite, fonction de repartition", - "Lois continues : uniforme, exponentielle, normale, gamma", - "Vecteurs aleatoires : loi jointe, lois marginales, covariance", - "Independance de variables aleatoires", - "Fonction generatrice des moments (MGF)", - "Inegalites : Markov, Tchebychev, Jensen", - ], - exercises: [ - "Pour 5 problemes par jour : identifier la loi, calculer E[X], Var(X)", - "Calculer la densite de transformations de v.a. continues", - "Calculer la covariance et la correlation de 3 couples de v.a.", - "Verifier les inegalites de Markov et Tchebychev sur des exemples", - "Simuler en Python chaque loi et comparer a la theorie (histogramme vs densite)", - ], - mastery_checks: [ - "Je reconnais une loi binomiale, Poisson, ou exponentielle dans un enonce", - "Je calcule E[X^2] pour deduire Var(X) = E[X^2] - E[X]^2", - "Je calcule la densite de Y = g(X) par la methode du jacobien", - "J'applique l'inegalite de Tchebychev pour borner P(|X-mu| > k.sigma)", - ], - }, - { - stage: "Convergence", - focus: "Loi des grands nombres et TCL", - target: - "Convergence en probabilite, presque sure, en loi. LGN faible/forte, theoreme central limite.", - checkpoint: - "Appliquer le TCL pour construire un intervalle de confiance asymptotique.", - concepts: [ - "Convergence en probabilite : definition, exemples", - "Convergence presque sure : definition, lien avec la precedente", - "Convergence en loi : definition, fonction de repartition", - "Loi faible des grands nombres : enonce et demonstration", - "Loi forte des grands nombres : enonce", - "Theoreme central limite (TCL) : enonce, interpretation", - "Application du TCL : approximation normale, intervalles de confiance", - "Delta method (introduction)", - ], - exercises: [ - "Demontrer la LGN faible par Tchebychev", - "Appliquer le TCL pour approximer P(S_n > x) avec la loi normale", - "Construire un intervalle de confiance a 95% pour une proportion", - "Simuler la convergence en loi (visualiser les histogrammes)", - "Comparer convergence en proba vs p.s. sur des exemples numeriques", - ], - mastery_checks: [ - "Je cite la LGN et le TCL avec leurs hypotheses", - "Je construis un IC asymptotique a 95% pour une moyenne", - "Je sais que sqrt(n)(X_bar - mu)/sigma converge en loi vers N(0,1)", - "Je distingue convergence en proba, p.s., et en loi", - ], - }, - { - stage: "Estimation", - focus: "Estimation et tests", - target: - "Estimateurs (biais, consistance, efficacite), maximum de vraisemblance, tests d'hypotheses, p-value.", - checkpoint: - "Deriver le MLE pour une famille parametrique donnee et calculer l'intervalle de confiance.", - concepts: [ - "Statistique, estimateur : definition formelle", - "Biais, variance, erreur quadratique moyenne", - "Estimateur consistant, asymptotiquement normal", - "Maximum de vraisemblance (MLE) : definition, calcul, proprietes", - "Information de Fisher, borne de Cramer-Rao", - "Tests d'hypotheses : H0 vs H1, risques alpha et beta", - "p-value : definition, interpretation, pieges courants", - "Tests classiques : test de Student, chi-deux, test de proportion", - ], - exercises: [ - "Calculer le MLE pour 5 familles : Bernoulli, Poisson, Normal, Exponentielle, Uniforme", - "Verifier le biais et la consistance de chaque estimateur", - "Construire un test d'hypothese complet avec region de rejet", - "Calculer la p-value et interpreter le resultat (3 exemples/jour)", - "Implementer le MLE en Python pour une regression logistique simple", - ], - mastery_checks: [ - "Je derive le MLE pour une famille exponentielle", - "Je calcule l'information de Fisher et la borne de Cramer-Rao", - "Je construis un test de Student bilateral complet", - "Je sais que la p-value n'est PAS P(H0 est vraie)", - ], - }, - ], - resources: [ - { type: "book", label: "Tout-en-un MP (Dunod) - Probabilites" }, - { type: "book", label: "All of Statistics - Larry Wasserman (Springer)" }, - { - type: "video", - label: "MIT 6.041 - Introduction to Probability (Tsitsiklis)", - }, - { type: "site", label: "Exo7 - Probabilites et statistiques (CPGE)" }, - { type: "video", label: "StatQuest - Intuitions statistiques en video" }, - { - type: "exercice", - label: "Seeing Theory (Brown) - Visualisations interactives", - }, - ], - progression_gate: [ - "Resoudre un probleme de Bayes multi-etapes", - "Deriver le MLE pour toute famille parametrique classique", - "Appliquer le TCL pour construire un IC asymptotique", - "Construire un test d'hypothese complet avec decision", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Prepa ML - Maths Appliquees ML */ -/* ------------------------------------------------------------------ */ - -const APPLIED_ML_DATA: EnrichedTab = { - overview: - "Relier chaque concept mathematique a son implementation ML : backprop = chain rule, softmax = exp normalise, batch norm = standardisation.", - prerequisites: [ - "Algebre lineaire complete (SVD, PCA)", - "Analyse et optimisation (gradient descent, convexite)", - "Probabilites et estimation (MLE, Bayes)", - ], - estimated_weeks: 10, - daily_minutes: 120, - blocks: [ - { - stage: "Information", - focus: "Theorie de l'information", - target: - "Entropie de Shannon, entropie croisee, divergence KL, information mutuelle, lien avec la cross-entropy loss.", - checkpoint: - "Deriver la loss de classification a partir de la KL divergence.", - concepts: [ - "Entropie de Shannon : H(X) = -sum p(x) log p(x)", - "Entropie croisee : H(p,q) = -sum p(x) log q(x)", - "Divergence KL : D_KL(p||q) = H(p,q) - H(p)", - "Information mutuelle : I(X;Y) = H(X) + H(Y) - H(X,Y)", - "Lien : minimiser la cross-entropy = minimiser la KL", - "Entropie maximale et lien avec la loi normale", - "Application : cross-entropy loss en classification", - ], - exercises: [ - "Calculer H(X) pour 5 distributions discretes", - "Calculer D_KL(p||q) et verifier l'asymetrie", - "Deriver la cross-entropy loss a partir de la KL divergence", - "Montrer que la loi normale maximise l'entropie a variance fixee", - "Implementer le calcul d'entropie et de KL divergence en Python", - ], - mastery_checks: [ - "J'ecris H(X), H(p,q), D_KL de memoire avec les bonnes formules", - "J'explique pourquoi minimiser la cross-entropy = minimiser la KL", - "Je calcule l'entropie d'une distribution Bernoulli en fonction de p", - "Je sais que D_KL >= 0 (inegalite de Gibbs) et je le demontre", - ], - }, - { - stage: "Optimisation ML", - focus: "SGD et variantes", - target: - "SGD, momentum, Adam, learning rate schedules, convergence stochastique, mini-batch.", - checkpoint: - "Implementer Adam from scratch et comparer la convergence vs SGD vanilla sur un toy problem.", - concepts: [ - "SGD : gradient stochastique, variance du gradient, mini-batch", - "Momentum : moyenne mobile du gradient, acceleration", - "RMSProp : adaptation du learning rate par parametre", - "Adam : momentum + RMSProp + correction de biais", - "Learning rate schedules : step decay, cosine annealing, warmup", - "Convergence stochastique : conditions de Robbins-Monro", - "Batch normalization : lien avec la standardisation", - "Weight decay vs L2 regularisation (distinction avec Adam)", - ], - exercises: [ - "Implementer SGD, Momentum, RMSProp, Adam from scratch en NumPy", - "Comparer les 4 optimiseurs sur f(x,y) = x^2 + 10.y^2 (rosenbrock simple)", - "Tracer les courbes de convergence pour differents learning rates", - "Implementer cosine annealing et comparer a step decay", - "Analyser l'effet du batch size sur la variance du gradient", - "Montrer que weight decay != L2 reg pour Adam (implementer les deux)", - ], - mastery_checks: [ - "J'ecris les equations de mise a jour d'Adam de memoire", - "J'explique la correction de biais dans Adam (pourquoi m_hat et v_hat)", - "Je choisis un optimiseur et un LR schedule adaptes a un probleme donne", - "J'implemente un optimiseur complet en 20 lignes de NumPy", - ], - }, - { - stage: "Algebre appliquee", - focus: "Noyaux, espaces de Hilbert, regularisation", - target: - "RKHS (intuition), kernel trick, ridge/lasso comme optimisation contrainte, normes L1/L2.", - checkpoint: - "Montrer que la ridge regression a une solution fermee et l'interpreter geometriquement.", - concepts: [ - "Norme L1, L2 : definition, proprietes, comparaison", - "Ridge regression : min ||Ax-b||^2 + lambda.||x||_2^2", - "Solution fermee : x = (A^T.A + lambda.I)^(-1).A^T.b", - "Lasso regression : min ||Ax-b||^2 + lambda.||x||_1 (pas de solution fermee)", - "Interpretation bayesienne : ridge = prior gaussien, lasso = prior laplacien", - "Kernel trick : produit scalaire dans un espace de grande dimension", - "RKHS : intuition, noyau reproduisant, theoreme de representation", - "SVM : marge maximale, noyaux, formulation duale", - ], - exercises: [ - "Deriver la solution fermee de ridge regression", - "Implementer ridge et lasso en NumPy (et comparer a sklearn)", - "Tracer la solution en fonction de lambda (regularization path)", - "Appliquer le kernel trick : transformer x -> phi(x) puis produit scalaire", - "Interpreter geometriquement ridge (cercle) vs lasso (losange)", - "Implementer un SVM lineaire simple et visualiser la marge", - ], - mastery_checks: [ - "Je derive la solution de ridge et j'explique le role de lambda.I", - "J'explique pourquoi lasso produit des poids exactement zero", - "J'interprete ridge = regularisation bayesienne avec prior gaussien", - "J'applique le kernel trick pour classifier des donnees non lineaires", - ], - }, - { - stage: "Synthese", - focus: "Du theoreme au code", - target: - "Relier chaque concept mathematique a son implementation ML : backprop = chain rule, softmax = exp normalise, batch norm = standardisation.", - checkpoint: - "Whiteboard d'un forward/backward pass complet avec les maths a chaque etape.", - concepts: [ - "Backpropagation = application recursive de la chain rule", - "Softmax = exponentielle normalisee (lien avec Boltzmann)", - "Batch norm = standardisation (mu=0, sigma=1) par mini-batch", - "Dropout = inference bayesienne approximee (MC Dropout)", - "Attention = produit scalaire normalise (Q.K^T/sqrt(d)).V", - "Cross-entropy loss = - log vraisemblance (lien MLE)", - "Xavier/He init = controle de la variance des activations", - ], - exercises: [ - "Deriver le backward pass d'un reseau 2 couches (relu + softmax + CE loss)", - "Implementer backprop from scratch (NumPy) et comparer a PyTorch autograd", - "Implementer softmax numeriquement stable (soustraire max)", - "Implementer batch norm et verifier le gradient", - "Implementer un transformer block (attention + FFN + layer norm) from scratch", - "Expliquer la Xavier init : pourquoi Var(W) = 1/n ?", - ], - mastery_checks: [ - "Je derive le gradient de la cross-entropy loss apres softmax", - "J'implemente un forward/backward pass complet en NumPy", - "J'explique pourquoi softmax est numeriquement instable et comment le corriger", - "Je relie chaque composant d'un reseau a son fondement mathematique", - "Je dessine le graphe de calcul d'un transformer block avec les dimensions", - ], - }, - ], - resources: [ - { - type: "book", - label: - "Deep Learning - Goodfellow, Bengio, Courville (MIT Press, gratuit)", - }, - { - type: "book", - label: - "Mathematics for Machine Learning - Deisenroth, Faisal, Ong (gratuit)", - }, - { type: "video", label: "Andrej Karpathy - Neural Networks: Zero to Hero" }, - { - type: "site", - label: "Stanford CS229 - Machine Learning (notes + exercices)", - }, - { type: "video", label: "3Blue1Brown - Neural Networks" }, - { - type: "exercice", - label: "d2l.ai - Dive into Deep Learning (code interactif)", - }, - { - type: "site", - label: "The Matrix Calculus You Need For Deep Learning (Parr & Howard)", - }, - ], - progression_gate: [ - "Deriver la backpropagation a la main pour un reseau 2 couches", - "Implementer PCA, ridge, logistic regression from scratch (NumPy only)", - "Expliquer le lien KL divergence -> cross-entropy loss -> softmax", - "Resoudre un probleme d'optimisation sous contrainte (Lagrangien + KKT)", - "Whiteboard complet d'un forward/backward pass avec maths", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Prepa ML - Geometrie & 3D */ -/* ------------------------------------------------------------------ */ - -const GEOMETRY_3D_DATA: EnrichedTab = { - overview: - "Geometrie projective, transformations 3D et modeles de cameras - les mathematiques qui sous-tendent la vision par ordinateur et la reconstruction 3D.", - prerequisites: [ - "Algebre lineaire complete (SVD, PCA)", - "Analyse et optimisation (gradient, hessienne)", - ], - estimated_weeks: 8, - daily_minutes: 120, - blocks: [ - { - stage: "Bloc 1", - focus: "Rotations et groupes SO(3)/SE(3)", - target: - "Maitriser les representations de rotations 3D (matrices, angles d'Euler, quaternions) et le groupe des deplacements rigides.", - checkpoint: - "Composer des rotations avec matrices et quaternions, convertir entre representations.", - concepts: [ - "Groupe SO(3)", - "Groupe SE(3)", - "Quaternions", - "Angles d'Euler", - "Matrice de rotation", - "Gimbal lock", - ], - exercises: [ - "Prouver que SO(3) est un groupe (fermeture, associativite, element neutre, inverse).", - "Implementer la conversion quaternion <-> matrice de rotation en Python.", - "Demontrer le probleme du gimbal lock avec les angles d'Euler.", - "Composer 3 rotations successives avec des quaternions et verifier le resultat.", - ], - mastery_checks: [ - "Expliquer pourquoi les quaternions evitent le gimbal lock", - "Decrire la topologie de SO(3)", - ], - }, - { - stage: "Bloc 2", - focus: "Coordonnees homogenes et transformations projectives", - target: - "Comprendre l'espace projectif, les coordonnees homogenes et les transformations projectives pour la geometrie multi-vues.", - checkpoint: - "Appliquer des transformations projectives a des points 2D et 3D, calculer des homographies.", - concepts: [ - "Coordonnees homogenes", - "Espace projectif", - "Homographie", - "Transformation affine", - "Transformation projective", - ], - exercises: [ - "Calculer l'homographie entre deux vues d'un plan a partir de 4 correspondances.", - "Implementer une transformation projective 2D en coordonnees homogenes.", - "Demontrer que les droites paralleles se coupent au point a l'infini en coordonnees homogenes.", - ], - mastery_checks: [ - "Expliquer la difference entre transformation affine et projective", - "Decrire le plan projectif", - ], - }, - { - stage: "Bloc 3", - focus: "Geometrie epipolaire et vision multi-vues", - target: - "Maitriser la geometrie epipolaire, la matrice fondamentale/essentielle et la triangulation pour la reconstruction 3D.", - checkpoint: - "Calculer F et E a partir de correspondances, trianguler des points 3D.", - concepts: [ - "Geometrie epipolaire", - "Matrice fondamentale", - "Matrice essentielle", - "Triangulation", - "Structure from Motion", - ], - exercises: [ - "Deriver la contrainte epipolaire x'Fx = 0 a partir du modele de camera.", - "Implementer l'algorithme des 8 points pour estimer la matrice fondamentale.", - "Trianguler un point 3D a partir de deux vues avec la methode DLT.", - "Decomposer la matrice essentielle en rotation et translation.", - ], - mastery_checks: [ - "Expliquer la relation entre F et E", - "Decrire le pipeline SfM complet", - ], - }, - { - stage: "Bloc 4", - focus: "Modeles de cameras et calibration", - target: - "Comprendre le modele pin-hole, les parametres intrinseques/extrinseques et la calibration de cameras.", - checkpoint: - "Calibrer une camera avec un damier, projeter des points 3D en 2D.", - concepts: [ - "Modele pin-hole", - "Parametres intrinseques", - "Parametres extrinseques", - "Distorsion radiale", - "Calibration de Zhang", - ], - exercises: [ - "Deriver la matrice de projection 3x4 du modele pin-hole.", - "Implementer la calibration de Zhang avec OpenCV et analyser les residus.", - "Corriger la distorsion radiale sur une image reelle.", - ], - mastery_checks: [ - "Expliquer chaque parametre de la matrice intrinseque K", - "Decrire les sources d'erreur en calibration", - ], - }, - ], - resources: [ - { - type: "book", - label: "Multiple View Geometry in Computer Vision - Hartley & Zisserman", - }, - { - type: "site", - label: "Stanford CS231A - Computer Vision: 3D Reconstruction", - }, - { - type: "book", - label: "Quaternions and Rotation Sequences - Jack B. Kuipers", - }, - ], - progression_gate: [ - "Composer des rotations avec matrices et quaternions", - "Calculer une homographie et l'appliquer a une image", - "Estimer la matrice fondamentale et trianguler des points 3D", - "Calibrer une camera et projeter des points 3D en 2D", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data: Prepa ML - Dynamique & Physique */ -/* ------------------------------------------------------------------ */ - -const DYNAMICS_PHYSICS_DATA: EnrichedTab = { - overview: - "EDO/EDP, mecanique lagrangienne, modeles a base d'energie et integration numerique - les fondements mathematiques de la simulation physique et des reseaux informes par la physique.", - prerequisites: [ - "Analyse et optimisation (gradient descent, convexite)", - "Algebre lineaire complete (SVD, PCA)", - ], - estimated_weeks: 8, - daily_minutes: 120, - blocks: [ - { - stage: "Bloc 1", - focus: "EDO et systemes dynamiques", - target: - "Maitriser la resolution d'equations differentielles ordinaires et l'analyse qualitative de systemes dynamiques.", - checkpoint: - "Resoudre des EDO lineaires et non-lineaires, analyser la stabilite des points fixes.", - concepts: [ - "EDO lineaires", - "Systemes dynamiques", - "Points fixes", - "Stabilite", - "Portrait de phase", - "Theoreme de Cauchy-Lipschitz", - ], - exercises: [ - "Resoudre analytiquement un systeme lineaire 2x2 et tracer le portrait de phase.", - "Implementer Euler explicite et RK4 pour un oscillateur harmonique, comparer la conservation d'energie.", - "Analyser la stabilite d'un pendule simple (points fixes, linearisation, diagramme de bifurcation).", - "Demontrer la convergence de la methode RK4 sur un exemple.", - ], - mastery_checks: [ - "Classifier les points fixes d'un systeme 2D", - "Expliquer pourquoi Euler explicite ne conserve pas l'energie", - ], - }, - { - stage: "Bloc 2", - focus: "Mecanique lagrangienne et hamiltonienne", - target: - "Formuler des problemes de mecanique avec les equations de Lagrange et Hamilton, comprendre la conservation de l'energie.", - checkpoint: - "Deriver les equations du mouvement d'un systeme mecanique via le Lagrangien.", - concepts: [ - "Lagrangien", - "Equations d'Euler-Lagrange", - "Hamiltonien", - "Coordonnees generalisees", - "Principe de moindre action", - "Theoreme de Noether", - ], - exercises: [ - "Deriver les equations du mouvement d'un double pendule via le Lagrangien.", - "Montrer l'equivalence entre formulations lagrangienne et newtonienne pour un systeme simple.", - "Implementer un integrateur symplectique (Stormer-Verlet) et verifier la conservation de l'energie.", - "Appliquer le theoreme de Noether pour trouver les quantites conservees d'un systeme.", - ], - mastery_checks: [ - "Expliquer l'avantage des coordonnees generalisees", - "Decrire pourquoi les integrateurs symplectiques conservent l'energie", - ], - }, - { - stage: "Bloc 3", - focus: "Modeles a base d'energie et reseaux informes par la physique", - target: - "Comprendre les Energy-Based Models (EBM) et les Physics-Informed Neural Networks (PINNs) qui integrent des contraintes physiques dans l'apprentissage.", - checkpoint: - "Implementer un PINN simple et un EBM, expliquer la connexion avec la physique.", - concepts: [ - "Energy-Based Models", - "Physics-Informed Neural Networks", - "Neural ODE", - "Conservation laws", - "Soft constraints", - ], - exercises: [ - "Implementer un PINN pour resoudre l'equation de la chaleur 1D.", - "Entrainer un Neural ODE sur des trajectoires de pendule et evaluer la generalisation.", - "Comparer un PINN avec une resolution numerique classique (precision, cout, generalisation).", - "Implementer un Hamiltonian Neural Network qui conserve l'energie par construction.", - ], - mastery_checks: [ - "Expliquer comment un PINN integre les EDP dans la loss function", - "Decrire la difference entre hard et soft physics constraints", - ], - }, - { - stage: "Bloc 4", - focus: "EDP et simulation numerique", - target: - "Introduction aux equations aux derivees partielles et methodes de simulation numerique pour la physique computationnelle.", - checkpoint: - "Discretiser et resoudre une EDP simple, comprendre les conditions de stabilite.", - concepts: [ - "Equation de la chaleur", - "Equation des ondes", - "Differences finies", - "Condition CFL", - "Elements finis (intro)", - "Monte Carlo", - ], - exercises: [ - "Discretiser l'equation de la chaleur 2D avec les differences finies et implementer en Python.", - "Verifier la condition CFL sur l'equation des ondes et montrer l'instabilite en cas de violation.", - "Implementer une simulation Monte Carlo simple (diffusion de particules).", - "Comparer differences finies et PINN sur un probleme de Poisson 2D.", - ], - mastery_checks: [ - "Expliquer la condition CFL intuitivement", - "Decrire quand utiliser Monte Carlo vs differences finies", - ], - }, - ], - resources: [ - { - type: "book", - label: "Nonlinear Dynamics and Chaos - Steven Strogatz", - }, - { - type: "book", - label: - "Physics-Based Deep Learning (PBDL Book) - Thuerey et al. (gratuit en ligne)", - }, - { - type: "video", - label: "MIT 18.03 - Differential Equations (OCW)", - }, - ], - progression_gate: [ - "Resoudre et analyser un systeme dynamique 2D (points fixes, stabilite, portrait de phase)", - "Deriver les equations du mouvement via le Lagrangien", - "Implementer un PINN et comparer a une resolution classique", - "Discretiser et resoudre une EDP avec les differences finies", - ], -}; - -/* ------------------------------------------------------------------ */ -/* Data lookup */ -/* ------------------------------------------------------------------ */ - -const TAB_DATA: Partial> = { - diagnostic: DIAGNOSTIC_DATA, - college: COLLEGE_DATA, - lycee: LYCEE_DATA, - terminale: TERMINALE_DATA, - linear_algebra: LINEAR_ALGEBRA_DATA, - analysis: ANALYSIS_DATA, - probability: PROBABILITY_DATA, - applied_ml: APPLIED_ML_DATA, - geometry_3d: GEOMETRY_3D_DATA, - dynamics_physics: DYNAMICS_PHYSICS_DATA, -}; - -/* ------------------------------------------------------------------ */ -/* Shared components */ -/* ------------------------------------------------------------------ */ - -function StatCard({ - value, - label, - note, - accentColor, -}: { - value: string; - label: string; - note: string; - accentColor?: string; -}) { - return ( -
-

- {value} -

-

{label}

-

{note}

-
- ); -} - -function SectionBox({ - title, - accent, - accentColor, - children, -}: { - title: string; - accent?: "blue" | "neutral" | "yellow" | "purple"; - accentColor?: string; - children: React.ReactNode; -}) { - const border_color = - accent === "neutral" - ? "border-white/[0.06]" - : accent === "yellow" - ? "border-white/[0.06]" - : accent === "purple" - ? "border-[rgba(94,106,210,0.2)]" - : "border-white/[0.08]"; - - return ( -
-

- {title} -

- {children} -
- ); -} - -function BulletList({ items }: { items: string[] }) { - return ( -
    - {items.map((item) => ( -
  • {item}
  • - ))} -
- ); -} - -function CheckList({ items, label }: { items: string[]; label?: string }) { - return ( -
- {label ? ( -

{label}

- ) : null} -
    - {items.map((item) => ( -
  • - - {item} -
  • - ))} -
-
- ); -} - -function ResourceBadge({ type }: { type: Resource["type"] }) { - const styles: Record< - Resource["type"], - { label: string; color?: string; className?: string } - > = { - book: { label: "Livre", color: "#5e6ad2" }, - video: { label: "Video", color: "#f472b6" }, - site: { label: "Site", className: "text-white/70 bg-white/[0.04]" }, - exercice: { - label: "Exercices", - className: "text-white/70 bg-white/[0.04]", - }, - }; - const s = styles[type]; - return ( - - {s.label} - - ); -} - -/* ------------------------------------------------------------------ */ -/* Enriched topic block */ -/* ------------------------------------------------------------------ */ - -function TopicCard({ - block, - accentColor, -}: { - block: TopicBlock; - accentColor?: string; -}) { - return ( -
-
-

- {block.stage} -

-

- {block.focus} -

-

{block.target}

-
- - {block.concepts.length > 0 && ( -
-

Concepts cles

-
    - {block.concepts.map((c) => ( -
  • {c}
  • - ))} -
-
- )} - - {block.exercises.length > 0 && ( -
-

- Exercices pratiques -

-
    - {block.exercises.map((e) => ( -
  • {e}
  • - ))} -
-
- )} - - {block.mastery_checks.length > 0 && ( -
-

Je maitrise si...

-
    - {block.mastery_checks.map((m) => ( -
  • - - {m} -
  • - ))} -
-
- )} - -
-

- Checkpoint : {block.checkpoint} -

-
-
- ); -} - -/* ------------------------------------------------------------------ */ -/* Enriched tab content renderer */ -/* ------------------------------------------------------------------ */ - -function EnrichedTabContent({ - data, - accentColor, -}: { - data: EnrichedTab; - accentColor?: string; -}) { - return ( -
- {/* Stats row */} -
- {data.estimated_weeks != null && ( - - )} - - sum + b.mastery_checks.length, 0)}`} - label="Points de maitrise" - note="Checklists pour evaluer votre progression." - accentColor={accentColor} - /> -
- - {/* Overview */} - -

{data.overview}

-
- - {/* Prerequisites */} - {data.prerequisites && data.prerequisites.length > 0 && ( - - - - )} - - {/* Topic blocks */} -
-

- Plan detaille par bloc -

-
- {data.blocks.map((block) => ( - - ))} -
-
- - {/* Resources */} - {data.resources.length > 0 && ( - -
    - {data.resources.map((r) => ( -
  • - - {r.label} -
  • - ))} -
-
- )} - - {/* Progression gate */} - {data.progression_gate.length > 0 && ( - - - - )} -
- ); -} - -/* ------------------------------------------------------------------ */ -/* Methode content (shared between tracks) */ -/* ------------------------------------------------------------------ */ - -const DAILY_SESSION_TEMPLATE = [ - "10 min - echauffement calcul mental ou rappels rapides.", - "5 min - relire le carnet d'erreurs de la veille.", - "30 min - lecon ciblee : lire la theorie puis resoudre des exercices guides.", - "15 min - drill chronometre (quiz ou serie d'exercices sans aide).", - "10 min - corriger, noter les erreurs dans le carnet, planifier le lendemain.", -]; - -const WEEKLY_REVIEW_STEPS = [ - "Lundi-Vendredi : sessions quotidiennes sur le bloc en cours.", - "Samedi : quiz cumulatif de 30 questions (bloc actuel + blocs precedents).", - "Dimanche : revue du carnet d'erreurs, retest des points faibles, planification semaine suivante.", -]; - -const SPACED_REPETITION_RULES = [ - "Chaque erreur est notee dans le carnet avec la date.", - "Premiere revision : 1 jour apres l'erreur.", - "Deuxieme revision : 3 jours apres.", - "Troisieme revision : 7 jours apres.", - "Si l'erreur revient, elle repart au jour 1.", - "Quand un concept est reussi 3 fois de suite, il sort du carnet.", -]; - -const MASTERY_GATE_RULES = [ - "Ne pas avancer tant qu'un sujet n'a pas atteint 80% deux fois consecutives.", - "Tout theme tombant sous 70% au test cumulatif repasse en priorite immediate.", - "Suivre a la fois la precision ET le temps - eviter une comprehension lente et fragile.", - "Un bloc est valide quand TOUS les 'Je maitrise si...' sont coches.", - "Le test de checkpoint (en conditions d'examen) confirme la maitrise reelle.", -]; - -const TESTING_CADENCE = [ - "Diagnostic de positionnement au demarrage de chaque track.", - "Quiz hebdomadaire chronometre de 25 a 30 questions (samedi).", - "Test cumulatif bimensuel couvrant tous les blocs precedents.", - "Chaque erreur est consignee et retestee selon le calendrier de repetition espacee.", - "Test de checkpoint en conditions d'examen a la fin de chaque bloc.", -]; - -const ERROR_JOURNAL_FORMAT = [ - "Date | Sujet | Erreur commise | Cause (calcul / concept / inattention) | Correction", - "Exemple : 15/03 | Fractions | 1/3 + 1/4 = 2/7 | Concept (denominateur commun) | 4/12 + 3/12 = 7/12", - "Relire le journal avant chaque session (5 min).", - "Marquer 'OK x3' quand l'erreur est resolue 3 fois de suite.", -]; - -function MethodeContent({ - track, - accentColor, -}: { - track: Track; - accentColor?: string; -}) { - const is_zero = track === "zero_to_one"; - return ( -
-
- - - -
- - -

- Chaque onglet represente un bloc sequentiel. Vous ne passez au suivant - que lorsque tous les criteres de maitrise sont valides. Il n'y a pas - de raccourci : un concept mal maitrise au niveau N cree des lacunes au - niveau N+1. -

-

- La progression est :{" "} - {is_zero - ? "Diagnostic -> College -> Lycee -> Terminale S" - : "Algebre Lineaire -> Analyse -> Probabilites -> Maths Appliquees ML"} - . Chaque etape a des prerequis explicites et un test de passage. -

-
- - - - - - - - - - - -
-

Format du carnet

-
    - {ERROR_JOURNAL_FORMAT.map((line) => ( -
  • - {line} -
  • - ))} -
-
-
- - - - - - - - -
- ); -} - -/* ------------------------------------------------------------------ */ -/* Evaluation content */ -/* ------------------------------------------------------------------ */ - -const ZERO_TO_ONE_EVAL_SECTIONS: { - title: string; - tab: ZeroToOneTab; - items: string[]; -}[] = [ - { - title: "Diagnostic & Arithmetique", - tab: "diagnostic", - items: [ - "Calcul mental fluide : 20 operations en 2 minutes", - "Fractions : addition, soustraction, multiplication, division sans erreur", - "Pourcentages et conversions fraction/decimal instantanes", - "Traduire un enonce texte en equation", - "Resoudre ax + b = c en moins de 30 secondes", - "Lire un graphique et determiner la pente", - ], - }, - { - title: "College (6e-3e)", - tab: "college", - items: [ - "Identites remarquables connues par coeur et appliquees", - "Theoreme de Pythagore : direct et reciproque", - "Theoreme de Thales avec redaction complete", - "Systeme 2x2 resolu par substitution et combinaison", - "Fonctions lineaires et affines : equation et graphe", - "Probabilites simples et statistiques descriptives", - "Trigonometrie dans le triangle rectangle (sin/cos/tan)", - ], - }, - { - title: "Seconde-Premiere S", - tab: "lycee", - items: [ - "Fonctions de reference tracees de memoire avec variations", - "Derivation fluide (toutes regles : somme, produit, quotient, composee)", - "Etude complete d'une fonction polynome de degre 3", - "Suites arithmetiques et geometriques : terme general et somme", - "Trigonometrie : valeurs remarquables et equations cos(x) = k, sin(x) = k", - "Produit scalaire : calcul et applications geometriques", - "Preuve par recurrence simple", - ], - }, - { - title: "Terminale S", - tab: "terminale", - items: [ - "Limites et formes indeterminees levees", - "Integration : primitives, integration par parties, aires", - "Exponentielle et logarithme : derivees, limites, croissances comparees", - "Nombres complexes : forme algebrique et trigonometrique", - "Loi normale et intervalle de confiance", - "Epreuve type bac complete en 4h a 14/20+", - ], - }, -]; - -const PREPA_ML_EVAL_SECTIONS: { - title: string; - tab: PrepaMlTab; - items: string[]; -}[] = [ - { - title: "Algebre Lineaire", - tab: "linear_algebra", - items: [ - "Base et dimension d'un sous-espace vectoriel", - "Diagonalisation d'une matrice 3x3 et calcul de A^n", - "Theoreme du rang applique correctement", - "SVD : decomposition et interpretation geometrique", - "PCA implementee from scratch en NumPy", - "Moindres carres : formulation et solution", - ], - }, - { - title: "Analyse", - tab: "analysis", - items: [ - "Convergence de series classiques sans notes", - "Gradient et hessienne de fonctions a plusieurs variables", - "Optimisation par multiplicateurs de Lagrange", - "Gradient descent implemente et convergence demontree (cas convexe)", - "Integrales multiples avec changement de variables", - "Developpements en serie entiere de memoire", - ], - }, - { - title: "Probabilites & Stats", - tab: "probability", - items: [ - "Formule de Bayes appliquee a des problemes multi-etapes", - "Lois classiques identifiees dans un enonce et E[X], Var(X) calcules", - "TCL enonce et applique pour construire un IC", - "MLE derive pour toute famille parametrique classique", - "Test d'hypothese construit avec region de rejet et p-value", - "Convergence en probabilite vs presque sure distinguees", - ], - }, - { - title: "Maths Appliquees ML", - tab: "applied_ml", - items: [ - "KL divergence -> cross-entropy loss derive et explique", - "Adam implemente from scratch avec correction de biais", - "Ridge regression : solution fermee derivee et interpretee", - "Backpropagation derivee a la main pour un reseau 2 couches", - "Forward/backward pass dessine au tableau avec maths a chaque etape", - "Lien theoreme -> code pour chaque composant d'un reseau", - ], - }, -]; - -function EvaluationContent({ - track, - accentColor, -}: { - track: Track; - accentColor?: string; -}) { - const sections = - track === "zero_to_one" - ? ZERO_TO_ONE_EVAL_SECTIONS - : PREPA_ML_EVAL_SECTIONS; - - const total_checks = sections.reduce((sum, s) => sum + s.items.length, 0); - const track_label = - track === "zero_to_one" ? "Zero to One" : "Classe Prepa ML"; - - return ( -
-
- - - -
- - -

- Parcourez chaque section et evaluez honnetement votre niveau. Une - competence est acquise si vous pouvez la demontrer sans notes et sans - aide, en conditions d'examen. Cochez chaque item quand vous l'avez - valide au moins 2 fois. -

-
- - {sections.map((section) => ( - - - - ))} - - - - -
- ); -} - -/* ------------------------------------------------------------------ */ -/* Content lookup */ -/* ------------------------------------------------------------------ */ - -function render_tab_content( - track: Track, - tab: TabKey, - accentColor?: string, -): React.JSX.Element { - if (tab === "methode") - return ; - if (tab === "pratique") return ; - if (tab === "evaluation") - return ; - const data = TAB_DATA[tab]; - if (!data) - return

Content not found.

; - return ; -} - -/* ------------------------------------------------------------------ */ -/* Root view */ -/* ------------------------------------------------------------------ */ - -export function MathRefreshView() { - const search = useSearch({ strict: false }) as { - track?: string; - tab?: string; - }; - - const active_track: Track = - search.track === "prepa_ml" ? "prepa_ml" : "zero_to_one"; - - const active_tab: TabKey = - search.tab && is_valid_tab(active_track, search.tab) - ? (search.tab as TabKey) - : default_tab(active_track); - - const track_meta = TRACKS.find((t) => t.key === active_track)!; - const active_tab_meta = tabs_for_track(active_track).find( - (tab) => tab.key === active_tab, - ); - const active_title = active_tab_meta?.label ?? "Maths"; - - useDocumentTitle(`Maths - ${active_title}`); - - return ( - - - -
- {render_tab_content(active_track, active_tab, active_tab_meta?.color)} -
-
- ); -} diff --git a/frontend/src/views/MightyGodModeView.tsx b/frontend/src/views/MightyGodModeView.tsx index 6d7d0a4..6f389c7 100644 --- a/frontend/src/views/MightyGodModeView.tsx +++ b/frontend/src/views/MightyGodModeView.tsx @@ -294,7 +294,6 @@ const SURFACE_GROUPS: SurfaceGroup[] = [ { path: "/cognitive-toolkit", label: "Cognitive Toolkit" }, { path: "/behavioral-design", label: "Behavioral Design" }, { path: "/elite-freelance", label: "Elite Freelance" }, - { path: "/math-refresh", label: "Math Refresh" }, { path: "/culture-generale", label: "Culture Generale" }, { path: "/chinese", label: "Chinese" }, { path: "/cantonese", label: "Cantonese" }, diff --git a/frontend/src/views/ReferenceView.tsx b/frontend/src/views/ReferenceView.tsx index b078451..1582422 100644 --- a/frontend/src/views/ReferenceView.tsx +++ b/frontend/src/views/ReferenceView.tsx @@ -12,9 +12,6 @@ const AppliedSystemsView = lazy(() => default: m.AppliedSystemsView, })), ); -const MathRefreshView = lazy(() => - import("./MathRefreshView").then((m) => ({ default: m.MathRefreshView })), -); const EliteFreelanceView = lazy(() => import("./EliteFreelanceView").then((m) => ({ default: m.EliteFreelanceView, @@ -84,7 +81,6 @@ const SECTION_MAP: Record< prep: PrepView, "dev-ref": DevRefView, "applied-systems": AppliedSystemsView, - "math-refresh": MathRefreshView, "elite-freelance": EliteFreelanceView, "ai-engineering": AIEngineeringView, "frontend-eng": FrontendEngView, diff --git a/frontend/src/views/culture-generale/LessonPanel.tsx b/frontend/src/views/culture-generale/LessonPanel.tsx index d959efb..46c15c7 100644 --- a/frontend/src/views/culture-generale/LessonPanel.tsx +++ b/frontend/src/views/culture-generale/LessonPanel.tsx @@ -1,6 +1,6 @@ import { BookOpen, ExternalLink } from "lucide-react"; -import { MathText } from "../../components/math-exercise/MathRenderer"; +import { MathText } from "../../components/MathRenderer"; import type { BlockLesson } from "./types"; /* ------------------------------------------------------------------ */ diff --git a/frontend/src/views/culture-generale/QuizEngine.tsx b/frontend/src/views/culture-generale/QuizEngine.tsx index c2b2e6a..1cd9db9 100644 --- a/frontend/src/views/culture-generale/QuizEngine.tsx +++ b/frontend/src/views/culture-generale/QuizEngine.tsx @@ -2,7 +2,7 @@ import { useCallback, useEffect, useRef, useState } from "react"; import { motion, AnimatePresence } from "motion/react"; import { Check, X, Clock } from "lucide-react"; -import { MathText } from "../../components/math-exercise/MathRenderer"; +import { MathText } from "../../components/MathRenderer"; import type { QcmQuestion } from "./types"; /* ------------------------------------------------------------------ */ diff --git a/frontend/src/views/math-bridge-checks.ts b/frontend/src/views/math-bridge-checks.ts deleted file mode 100644 index ce8acab..0000000 --- a/frontend/src/views/math-bridge-checks.ts +++ /dev/null @@ -1,220 +0,0 @@ -// ============================================================================ -// Math Bridge — Micro-Check Questions (3 per MVP skill) -// ============================================================================ - -export interface CheckQuestion { - question: string; - choices: string[]; - correctIndex: number; -} - -export interface SkillCheck { - skillSlug: string; - questions: [CheckQuestion, CheckQuestion, CheckQuestion]; -} - -export const SKILL_CHECKS: SkillCheck[] = [ - // ── Arithmetic ────────────────────────────────────────────────────────── - { - skillSlug: "arithmetic", - questions: [ - { - question: "What is 7 × 8 − 12 ÷ 4?", - choices: ["53", "56", "50", "59"], - correctIndex: 0, - }, - { - question: "What is (−3) × (−5) + (−2)?", - choices: ["17", "13", "−17", "−13"], - correctIndex: 1, - }, - { - question: "Round 4,736 to the nearest hundred.", - choices: ["4,700", "4,740", "4,800", "5,000"], - correctIndex: 0, - }, - ], - }, - - // ── Algebra ───────────────────────────────────────────────────────────── - { - skillSlug: "algebra", - questions: [ - { - question: "Solve for x: 3x + 7 = 22", - choices: ["x = 5", "x = 7", "x = 4", "x = 6"], - correctIndex: 0, - }, - { - question: "Factor: x² − 9", - choices: ["(x − 3)(x + 3)", "(x − 9)(x + 1)", "(x − 3)²", "x(x − 9)"], - correctIndex: 0, - }, - { - question: "If 2x − 5 > 3, what is x?", - choices: ["x > 4", "x > 1", "x < 4", "x > 8"], - correctIndex: 0, - }, - ], - }, - - // ── Functions ─────────────────────────────────────────────────────────── - { - skillSlug: "functions", - questions: [ - { - question: "If f(x) = 2x + 1, what is f(3)?", - choices: ["7", "6", "9", "5"], - correctIndex: 0, - }, - { - question: "What is the inverse of f(x) = 3x − 6?", - choices: [ - "f⁻¹(x) = (x + 6) / 3", - "f⁻¹(x) = 3x + 6", - "f⁻¹(x) = x / 3 − 6", - "f⁻¹(x) = (x − 6) / 3", - ], - correctIndex: 0, - }, - { - question: "Which function grows fastest as x → ∞?", - choices: ["2ˣ", "x²", "100x", "x³"], - correctIndex: 0, - }, - ], - }, - - // ── Probability Basics ────────────────────────────────────────────────── - { - skillSlug: "probability-basics", - questions: [ - { - question: "A fair die is rolled. What is P(even number)?", - choices: ["1/2", "1/3", "1/6", "2/3"], - correctIndex: 0, - }, - { - question: "Two coins are flipped. What is P(at least one head)?", - choices: ["3/4", "1/2", "1/4", "2/3"], - correctIndex: 0, - }, - { - question: - "If P(A) = 0.3 and A and B are independent with P(B) = 0.5, what is P(A and B)?", - choices: ["0.15", "0.80", "0.35", "0.50"], - correctIndex: 0, - }, - ], - }, - - // ── Vectors Intro ─────────────────────────────────────────────────────── - { - skillSlug: "vectors-intro", - questions: [ - { - question: "What is the magnitude of vector (3, 4)?", - choices: ["5", "7", "√7", "12"], - correctIndex: 0, - }, - { - question: "What is the dot product of (1, 2) and (3, −1)?", - choices: ["1", "5", "−1", "7"], - correctIndex: 0, - }, - { - question: "A unit vector has magnitude:", - choices: ["1", "0", "It depends on direction", "√2"], - correctIndex: 0, - }, - ], - }, - - // ── Derivatives ───────────────────────────────────────────────────────── - { - skillSlug: "derivatives", - questions: [ - { - question: "What is d/dx of x³?", - choices: ["3x²", "x²", "3x³", "x⁴/4"], - correctIndex: 0, - }, - { - question: "If f(x) = sin(x), what is f'(x)?", - choices: ["cos(x)", "−sin(x)", "−cos(x)", "tan(x)"], - correctIndex: 0, - }, - { - question: "Using the chain rule, what is d/dx of (2x + 1)³?", - choices: ["6(2x + 1)²", "3(2x + 1)²", "(2x + 1)²", "6x(2x + 1)²"], - correctIndex: 0, - }, - ], - }, - - // ── Conditional Probability ───────────────────────────────────────────── - { - skillSlug: "conditional-probability", - questions: [ - { - question: "If P(A) = 0.6 and P(B|A) = 0.5, what is P(A and B)?", - choices: ["0.30", "0.55", "0.10", "1.10"], - correctIndex: 0, - }, - { - question: "Bayes' theorem states P(A|B) = ?", - choices: [ - "P(B|A) × P(A) / P(B)", - "P(A) × P(B)", - "P(A) / P(B)", - "P(B|A) + P(A)", - ], - correctIndex: 0, - }, - { - question: - "A test has 95% sensitivity and 10% false positive rate. If 1% of the population is affected, what is roughly P(affected | positive)?", - choices: ["About 9%", "About 95%", "About 50%", "About 1%"], - correctIndex: 0, - }, - ], - }, - - // ── Linear Algebra ────────────────────────────────────────────────────── - { - skillSlug: "linear-algebra", - questions: [ - { - question: "What is the determinant of [[2, 1], [4, 3]]?", - choices: ["2", "6", "−2", "10"], - correctIndex: 0, - }, - { - question: "If Av = λv, then v is called a(n):", - choices: ["Eigenvector", "Basis vector", "Unit vector", "Null vector"], - correctIndex: 0, - }, - { - question: "Two vectors are linearly independent if:", - choices: [ - "Neither is a scalar multiple of the other", - "They point in the same direction", - "Their dot product is 1", - "Their sum is zero", - ], - correctIndex: 0, - }, - ], - }, -]; - -export function getCheckForSkill(slug: string): SkillCheck | undefined { - return SKILL_CHECKS.find((c) => c.skillSlug === slug); -} - -export function scoreCheck(answers: number[], check: SkillCheck): number { - const correct = answers.filter( - (a, i) => a === check.questions[i].correctIndex, - ).length; - return Math.round((correct / check.questions.length) * 100); -} diff --git a/frontend/src/views/math-bridge-data.ts b/frontend/src/views/math-bridge-data.ts deleted file mode 100644 index de27919..0000000 --- a/frontend/src/views/math-bridge-data.ts +++ /dev/null @@ -1,890 +0,0 @@ -// ============================================================================ -// Math Bridge Program — Data Model & Static Data -// ============================================================================ - -// === Tab Types === - -export type MathBridgeTab = - | "overview" - | "core_numeracy" - | "high_school" - | "pre_university" - | "engineering_prep"; - -export const TAB_ORDER: MathBridgeTab[] = [ - "overview", - "core_numeracy", - "high_school", - "pre_university", - "engineering_prep", -]; - -export const TAB_META: Record< - MathBridgeTab, - { label: string; description: string; color: string } -> = { - overview: { - label: "Overview", - description: - "Your path from school math to university-level foundations - four levels, each building on the last.", - color: "#55cdff", - }, - core_numeracy: { - label: "Core Numeracy", - description: - "The absolute basics - arithmetic, fractions, percentages, and reading graphs. If any of this feels shaky, start here.", - color: "#4ade80", - }, - high_school: { - label: "High School Math", - description: - "Algebra, functions, geometry, trig, and your first taste of probability. The toolkit you need before university-level work.", - color: "#ffc47c", - }, - pre_university: { - label: "Pre-University", - description: - "Vectors, matrices, calculus, and formal probability. The bridge between school math and degree-level thinking.", - color: "#f472b6", - }, - engineering_prep: { - label: "Engineering Prep", - description: - "Linear algebra, multivariable calculus, discrete math, and statistics for ML. After this, you're ready for Foundations.", - color: "#5e6ad2", - }, -}; - -// === Core Types === - -export interface BridgeResource { - title: string; - type: "video" | "textbook" | "interactive" | "practice"; - url: string; - free: boolean; -} - -export interface BridgeTopic { - name: string; - slug: string; - whyItMatters: string; - keyConcepts: string[]; - estimatedHours: number; - prerequisite?: string; - bridgesTo?: string[]; - resources: BridgeResource[]; -} - -export interface BridgeLevel { - level: number; - title: string; - subtitle: string; - description: string; - totalHours: number; - topics: BridgeTopic[]; -} - -export type LevelTab = Exclude; - -// === Level Data === - -export const BRIDGE_LEVELS: Record = { - // ── Level 0: Core Numeracy ────────────────────────────────────────────── - core_numeracy: { - level: 0, - title: "Core Numeracy", - subtitle: "The building blocks everything else rests on", - description: - "If you can do these confidently without a calculator, you have a solid base. If not, spend a few days here before moving on.", - totalHours: 25, - topics: [ - { - name: "Arithmetic", - slug: "arithmetic", - whyItMatters: - "Every calculation you'll ever do - from debugging a model's output to estimating cloud costs - starts with arithmetic fluency.", - keyConcepts: [ - "Order of operations (BODMAS/PEMDAS)", - "Negative numbers and signed arithmetic", - "Mental math shortcuts", - "Estimation and rounding", - ], - estimatedHours: 3, - resources: [ - { - title: "Khan Academy — Arithmetic", - type: "interactive", - url: "https://www.khanacademy.org/math/arithmetic", - free: true, - }, - { - title: "Brilliant — Everyday Math", - type: "interactive", - url: "https://brilliant.org/courses/everyday-math/", - free: false, - }, - ], - }, - { - name: "Fractions", - slug: "fractions", - whyItMatters: - "Fractions are how you think about proportions, splits, and ratios - essential for understanding probabilities later.", - keyConcepts: [ - "Equivalent fractions and simplification", - "Adding, subtracting, multiplying, dividing fractions", - "Mixed numbers and improper fractions", - "Converting between fractions, decimals, and percentages", - ], - estimatedHours: 3, - prerequisite: "arithmetic", - resources: [ - { - title: "Khan Academy — Fractions", - type: "interactive", - url: "https://www.khanacademy.org/math/arithmetic/fraction-arithmetic", - free: true, - }, - ], - }, - { - name: "Percentages", - slug: "percentages", - whyItMatters: - "Model accuracy, error rates, growth metrics, A/B test results - percentages are the language of measurement.", - keyConcepts: [ - "Percentage of a quantity", - "Percentage increase and decrease", - "Compound percentage changes", - "Percentage points vs relative percentage", - ], - estimatedHours: 3, - prerequisite: "fractions", - resources: [ - { - title: "Khan Academy — Percentages", - type: "interactive", - url: "https://www.khanacademy.org/math/cc-seventh-grade-math/cc-7th-fractions-decimals", - free: true, - }, - ], - }, - { - name: "Ratios & Proportions", - slug: "ratios", - whyItMatters: - "Scaling, normalization, and unit conversion all depend on ratios. You'll use them constantly in data preprocessing.", - keyConcepts: [ - "Simplifying ratios", - "Direct and inverse proportion", - "Unit rates and conversions", - "Scale factors", - ], - estimatedHours: 3, - prerequisite: "fractions", - resources: [ - { - title: "Khan Academy — Ratios, Rates, and Proportions", - type: "interactive", - url: "https://www.khanacademy.org/math/cc-sixth-grade-math/cc-6th-ratios-prop-topic", - free: true, - }, - ], - }, - { - name: "Exponents & Powers", - slug: "exponents", - whyItMatters: - "Exponential growth, logarithmic scales, scientific notation, and Big-O notation all need you to think in powers.", - keyConcepts: [ - "Laws of exponents (product, quotient, power rules)", - "Negative and fractional exponents", - "Scientific notation", - "Square roots and nth roots", - ], - estimatedHours: 4, - prerequisite: "arithmetic", - resources: [ - { - title: "Khan Academy — Exponents and Radicals", - type: "interactive", - url: "https://www.khanacademy.org/math/algebra/x2f8bb11595b61c86:exponents-radicals", - free: true, - }, - ], - }, - { - name: "Simple Equations", - slug: "simple-equations", - whyItMatters: - "Solving for unknowns is the core of algebra, optimization, and every ML loss function you'll ever encounter.", - keyConcepts: [ - "One-step and two-step equations", - "Balancing both sides", - "Substitution", - "Word problems to equations", - ], - estimatedHours: 4, - prerequisite: "arithmetic", - resources: [ - { - title: "Khan Academy — One-variable equations", - type: "interactive", - url: "https://www.khanacademy.org/math/algebra/x2f8bb11595b61c86:solve-equations-inequalities", - free: true, - }, - ], - }, - { - name: "Graph Reading", - slug: "graph-reading", - whyItMatters: - "Before you plot training curves or read dashboards, you need to be comfortable extracting meaning from charts.", - keyConcepts: [ - "Reading bar charts, line graphs, and pie charts", - "Axes, scales, and labels", - "Identifying trends and outliers", - "Coordinate pairs (x, y)", - ], - estimatedHours: 5, - resources: [ - { - title: "Khan Academy — Reading and Interpreting Data", - type: "interactive", - url: "https://www.khanacademy.org/math/statistics-probability/displaying-describing-data", - free: true, - }, - ], - }, - ], - }, - - // ── Level 1: High School Math ─────────────────────────────────────────── - high_school: { - level: 1, - title: "High School Math", - subtitle: "The core toolkit for any technical career", - description: - "This is what a good A-level or Bac S covers. Algebra, functions, geometry, trig, and your first look at probability and stats.", - totalHours: 50, - topics: [ - { - name: "Algebra", - slug: "algebra", - whyItMatters: - "Algebra is the language of every technical field. If you can manipulate expressions and solve equations fluently, everything else gets easier.", - keyConcepts: [ - "Expanding and factoring expressions", - "Quadratic equations (factoring, formula, completing the square)", - "Simultaneous equations", - "Inequalities", - "Polynomials", - ], - estimatedHours: 10, - prerequisite: "simple-equations", - resources: [ - { - title: "Khan Academy — Algebra 1", - type: "interactive", - url: "https://www.khanacademy.org/math/algebra", - free: true, - }, - { - title: "Professor Leonard — Algebra (YouTube)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLDesaqWTN6ESsmwELdrzhcGiRhk5DjwLP", - free: true, - }, - ], - }, - { - name: "Functions", - slug: "functions", - whyItMatters: - "Functions are the single most important concept in math and programming. Every ML model is a function. Every API is a function.", - keyConcepts: [ - "Domain, range, and notation", - "Linear, quadratic, and polynomial functions", - "Exponential and logarithmic functions", - "Composition and inverse functions", - "Graphing and transformations", - ], - estimatedHours: 10, - prerequisite: "algebra", - resources: [ - { - title: "Khan Academy — Algebra 2", - type: "interactive", - url: "https://www.khanacademy.org/math/algebra2", - free: true, - }, - { - title: "3Blue1Brown — Essence of Linear Algebra (intro)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab", - free: true, - }, - ], - }, - { - name: "Geometry Basics", - slug: "geometry-basics", - whyItMatters: - "Geometric intuition helps with vector spaces, distance metrics, and visualizing high-dimensional data.", - keyConcepts: [ - "Angles, triangles, and polygons", - "Area and perimeter formulas", - "Pythagorean theorem", - "Coordinate geometry (distance, midpoint, slope)", - "Circles and their equations", - ], - estimatedHours: 8, - resources: [ - { - title: "Khan Academy — Geometry", - type: "interactive", - url: "https://www.khanacademy.org/math/geometry", - free: true, - }, - ], - }, - { - name: "Trigonometry Basics", - slug: "trig-basics", - whyItMatters: - "Trig shows up in signal processing, Fourier transforms, rotations, and positional encodings in transformers.", - keyConcepts: [ - "Sine, cosine, tangent definitions", - "Unit circle", - "Trig identities (basic)", - "Solving right triangles", - "Radians vs degrees", - ], - estimatedHours: 8, - prerequisite: "geometry-basics", - resources: [ - { - title: "Khan Academy — Trigonometry", - type: "interactive", - url: "https://www.khanacademy.org/math/trigonometry", - free: true, - }, - ], - }, - { - name: "Probability Basics", - slug: "probability-basics", - whyItMatters: - "Every ML model makes probabilistic predictions. Understanding randomness, events, and likelihood is non-negotiable.", - keyConcepts: [ - "Sample spaces and events", - "Counting principles (multiplication rule)", - "Independent and dependent events", - "Simple probability calculations", - "Expected value (intuitive)", - ], - estimatedHours: 7, - resources: [ - { - title: "Khan Academy — Probability", - type: "interactive", - url: "https://www.khanacademy.org/math/statistics-probability/probability-library", - free: true, - }, - ], - }, - { - name: "Descriptive Statistics", - slug: "descriptive-stats", - whyItMatters: - "Before you can model data, you need to describe it. Mean, median, spread - these are your first tools for understanding any dataset.", - keyConcepts: [ - "Mean, median, mode", - "Range, variance, standard deviation", - "Histograms and box plots", - "Quartiles and percentiles", - "Normal distribution (shape intuition)", - ], - estimatedHours: 7, - prerequisite: "probability-basics", - resources: [ - { - title: "Khan Academy — Summarizing Quantitative Data", - type: "interactive", - url: "https://www.khanacademy.org/math/statistics-probability/summarizing-quantitative-data", - free: true, - }, - { - title: "StatQuest — Statistics Fundamentals (YouTube)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9", - free: true, - }, - ], - }, - ], - }, - - // ── Level 2: Pre-University Math ──────────────────────────────────────── - pre_university: { - level: 2, - title: "Pre-University Math", - subtitle: "The bridge between school and degree-level thinking", - description: - "Vectors, matrices, calculus, and formal probability. This is where math stops being about answers and starts being about structures.", - totalHours: 70, - topics: [ - { - name: "Vectors", - slug: "vectors-intro", - whyItMatters: - "Word embeddings, feature vectors, gradients - vectors are the data structure of ML. If you can think in vectors, you can think in ML.", - keyConcepts: [ - "Vectors as direction + magnitude", - "Vector addition and scalar multiplication", - "Dot product and angle between vectors", - "Unit vectors and normalization", - "2D and 3D vector operations", - ], - estimatedHours: 8, - prerequisite: "geometry-basics", - resources: [ - { - title: "3Blue1Brown — Essence of Linear Algebra (Ch 1-3)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab", - free: true, - }, - { - title: "Khan Academy — Vectors", - type: "interactive", - url: "https://www.khanacademy.org/math/linear-algebra/vectors-and-spaces", - free: true, - }, - ], - }, - { - name: "Matrices", - slug: "matrices-intro", - whyItMatters: - "Neural networks are matrix multiplications. Transformers are attention matrices. You'll work with matrices every single day.", - keyConcepts: [ - "Matrix notation and dimensions", - "Matrix addition and scalar multiplication", - "Matrix multiplication (row-by-column)", - "Identity and zero matrices", - "Transpose", - ], - estimatedHours: 8, - prerequisite: "vectors-intro", - resources: [ - { - title: "3Blue1Brown — Linear transformations and matrices", - type: "video", - url: "https://www.youtube.com/watch?v=kYB8IZa5AuE", - free: true, - }, - { - title: "Khan Academy — Matrix transformations", - type: "interactive", - url: "https://www.khanacademy.org/math/linear-algebra/matrix-transformations", - free: true, - }, - ], - }, - { - name: "Limits", - slug: "limits", - whyItMatters: - "Limits are the foundation of calculus. Without them, derivatives and integrals are just handwaving.", - keyConcepts: [ - "Intuitive idea of a limit", - "Left-hand and right-hand limits", - "Limit laws and techniques", - "Limits at infinity", - "Continuity", - ], - estimatedHours: 8, - prerequisite: "functions", - resources: [ - { - title: "3Blue1Brown — Essence of Calculus (Ch 1-2)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr", - free: true, - }, - { - title: "Khan Academy — Limits and Continuity", - type: "interactive", - url: "https://www.khanacademy.org/math/ap-calculus-ab/ab-limits-new", - free: true, - }, - ], - }, - { - name: "Derivatives", - slug: "derivatives", - whyItMatters: - "Every ML model learns by computing derivatives. Backpropagation is just the chain rule applied millions of times.", - keyConcepts: [ - "Derivative as rate of change", - "Power rule, product rule, quotient rule", - "Chain rule", - "Derivatives of common functions (sin, cos, exp, ln)", - "Higher-order derivatives", - ], - estimatedHours: 12, - prerequisite: "limits", - resources: [ - { - title: "3Blue1Brown — Essence of Calculus (Ch 3-6)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLZHQObOWTQDMsr9K-rj53DwVRMYO3t5Yr", - free: true, - }, - { - title: "Khan Academy — Differentiation", - type: "interactive", - url: "https://www.khanacademy.org/math/ap-calculus-ab/ab-differentiation-1-new", - free: true, - }, - { - title: "Paul's Online Math Notes — Derivatives", - type: "textbook", - url: "https://tutorial.math.lamar.edu/Classes/CalcI/DerivativeIntro.aspx", - free: true, - }, - ], - }, - { - name: "Integrals", - slug: "integrals", - whyItMatters: - "Integration computes areas, volumes, and cumulative distributions. You'll need it for probability density functions and loss surfaces.", - keyConcepts: [ - "Definite and indefinite integrals", - "Fundamental theorem of calculus", - "Basic integration techniques (substitution)", - "Area under a curve", - "Numerical integration (intuition)", - ], - estimatedHours: 12, - prerequisite: "derivatives", - resources: [ - { - title: "3Blue1Brown — Integration and the FTC", - type: "video", - url: "https://www.youtube.com/watch?v=rfG8ce4nNh0", - free: true, - }, - { - title: "Khan Academy — Integration", - type: "interactive", - url: "https://www.khanacademy.org/math/ap-calculus-ab/ab-integration-new", - free: true, - }, - ], - }, - { - name: "Combinatorics", - slug: "combinatorics", - whyItMatters: - "Counting is the foundation of probability. Permutations and combinations let you reason about possible outcomes systematically.", - keyConcepts: [ - "Permutations (ordered arrangements)", - "Combinations (unordered selections)", - "Binomial coefficients", - "Pigeonhole principle", - "Inclusion-exclusion principle", - ], - estimatedHours: 8, - prerequisite: "algebra", - resources: [ - { - title: "Khan Academy — Counting, Permutations, Combinations", - type: "interactive", - url: "https://www.khanacademy.org/math/statistics-probability/counting-permutations-and-combinations", - free: true, - }, - ], - }, - { - name: "Conditional Probability", - slug: "conditional-probability", - whyItMatters: - "Bayes' theorem, Naive Bayes classifiers, and probabilistic graphical models all need you to reason about conditional events.", - keyConcepts: [ - "Probability axioms (Kolmogorov)", - "Conditional probability P(A|B)", - "Bayes' theorem and its derivation", - "Law of total probability", - "Independence vs conditional independence", - ], - estimatedHours: 14, - prerequisite: "probability-basics", - resources: [ - { - title: "Khan Academy — Conditional Probability", - type: "interactive", - url: "https://www.khanacademy.org/math/statistics-probability/probability-library/conditional-probability-independence", - free: true, - }, - { - title: "3Blue1Brown — Bayes' Theorem", - type: "video", - url: "https://www.youtube.com/watch?v=HZGCoVF3YvM", - free: true, - }, - { - title: "Seeing Theory — Conditional Probability", - type: "interactive", - url: "https://seeing-theory.brown.edu/compound-probability/", - free: true, - }, - ], - }, - ], - }, - - // ── Level 3: Engineering / ML Prep ────────────────────────────────────── - engineering_prep: { - level: 3, - title: "Engineering / ML Prep", - subtitle: "The math you need the day before university starts", - description: - "Linear algebra, multivariable calculus, discrete math, and statistics for ML. After this level, you're ready for the Foundations tab.", - totalHours: 120, - topics: [ - { - name: "Linear Algebra", - slug: "linear-algebra", - whyItMatters: - "Neural networks, PCA, SVD, recommender systems - linear algebra is the backbone of modern ML. No shortcuts here.", - keyConcepts: [ - "Systems of linear equations", - "Gaussian elimination", - "Determinants", - "Eigenvalues and eigenvectors", - "Vector spaces and subspaces", - "Linear independence and basis", - "Orthogonality and projections", - ], - estimatedHours: 20, - prerequisite: "matrices-intro", - bridgesTo: [ - "Linear algebra (Foundations tab)", - "Matrix decomposition in ML", - ], - resources: [ - { - title: "3Blue1Brown — Essence of Linear Algebra (full)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab", - free: true, - }, - { - title: "MIT OCW 18.06 — Gilbert Strang", - type: "video", - url: "https://ocw.mit.edu/courses/18-06sc-linear-algebra-fall-2011/", - free: true, - }, - { - title: "Coding the Matrix (Philip Klein)", - type: "textbook", - url: "https://codingthematrix.com/", - free: false, - }, - ], - }, - { - name: "Calculus", - slug: "calculus", - whyItMatters: - "Gradient descent is multivariable calculus. Understanding partial derivatives and the chain rule is how you understand backprop.", - keyConcepts: [ - "Partial derivatives", - "Gradient vectors", - "Multivariable chain rule", - "Taylor series and approximations", - "Multiple integrals (double, triple)", - "Jacobians", - ], - estimatedHours: 20, - prerequisite: "integrals", - bridgesTo: [ - "Calculus (Foundations tab)", - "Optimization and gradient descent", - ], - resources: [ - { - title: "Khan Academy — Multivariable Calculus", - type: "interactive", - url: "https://www.khanacademy.org/math/multivariable-calculus", - free: true, - }, - { - title: "MIT OCW 18.02 — Multivariable Calculus", - type: "video", - url: "https://ocw.mit.edu/courses/18-02sc-multivariable-calculus-fall-2010/", - free: true, - }, - ], - }, - { - name: "Discrete Math", - slug: "discrete-math", - whyItMatters: - "Graph algorithms, boolean logic, proofs, and recursion - the math that makes computer science rigorous.", - keyConcepts: [ - "Propositional and predicate logic", - "Proof techniques (induction, contradiction)", - "Sets, relations, and functions (formal)", - "Graph theory basics (paths, cycles, trees)", - "Recurrence relations", - "Modular arithmetic", - ], - estimatedHours: 18, - prerequisite: "algebra", - bridgesTo: [ - "Algorithms (Foundations tab)", - "Data structures (Foundations tab)", - ], - resources: [ - { - title: "MIT OCW 6.042J — Mathematics for Computer Science", - type: "video", - url: "https://ocw.mit.edu/courses/6-042j-mathematics-for-computer-science-fall-2010/", - free: true, - }, - { - title: "Discrete Mathematics and Its Applications (Rosen)", - type: "textbook", - url: "https://www.mheducation.com/highered/product/discrete-mathematics-applications-rosen/M9781259676512.html", - free: false, - }, - ], - }, - { - name: "Probability & Random Variables", - slug: "probability-formal", - whyItMatters: - "Every generative model, every Bayesian method, every stochastic process needs formal probability. This is where intuition becomes math.", - keyConcepts: [ - "Probability axioms (formal)", - "Random variables (discrete and continuous)", - "Probability mass and density functions", - "Expectation and variance", - "Covariance and correlation", - "Common distributions (Bernoulli, Binomial, Poisson, Normal, Exponential)", - "Central Limit Theorem", - "Law of Large Numbers", - "Moment generating functions", - ], - estimatedHours: 22, - prerequisite: "conditional-probability", - bridgesTo: [ - "Probability and statistics (Foundations tab)", - "Bayesian methods in ML", - ], - resources: [ - { - title: "MIT OCW 6.041 — Probabilistic Systems Analysis", - type: "video", - url: "https://ocw.mit.edu/courses/6-041sc-probabilistic-systems-analysis-and-applied-probability-fall-2013/", - free: true, - }, - { - title: "Khan Academy — Random Variables", - type: "interactive", - url: "https://www.khanacademy.org/math/statistics-probability/random-variables-stats-library", - free: true, - }, - { - title: "Seeing Theory — Probability Distributions", - type: "interactive", - url: "https://seeing-theory.brown.edu/probability-distributions/", - free: true, - }, - ], - }, - { - name: "Optimization Basics", - slug: "optimization-basics", - whyItMatters: - "ML training is optimization. Understanding minima, maxima, convexity, and constraints is how you reason about why a model converges or doesn't.", - keyConcepts: [ - "Local vs global minima and maxima", - "Gradient descent (single variable, then multi)", - "Learning rate intuition", - "Convex vs non-convex functions", - "Constrained optimization (Lagrange multipliers intro)", - "Saddle points", - ], - estimatedHours: 14, - prerequisite: "calculus", - bridgesTo: [ - "Optimization in deep learning (Foundations tab)", - "Loss functions and training loops", - ], - resources: [ - { - title: "Khan Academy — Optimization", - type: "interactive", - url: "https://www.khanacademy.org/math/multivariable-calculus/applications-of-multivariable-derivatives/optimizing-multivariable-functions/a/maximums-minimums-and-saddle-points", - free: true, - }, - { - title: "3Blue1Brown — Gradient Descent", - type: "video", - url: "https://www.youtube.com/watch?v=IHZwWFHWa-w", - free: true, - }, - ], - }, - { - name: "Statistics for ML", - slug: "statistics-for-ml", - whyItMatters: - "Knowing when a result is meaningful vs noise. Hypothesis testing, confidence intervals, and regression are how you validate models and experiments.", - keyConcepts: [ - "Correlation vs causation", - "Linear regression (least squares)", - "Hypothesis testing framework", - "p-values and significance levels", - "t-tests (one-sample, two-sample, paired)", - "Confidence intervals", - "Chi-square tests (goodness of fit, independence)", - "Type I and Type II errors", - "Multiple testing correction (Bonferroni)", - ], - estimatedHours: 26, - prerequisite: "probability-formal", - bridgesTo: [ - "Statistics (Foundations tab)", - "A/B testing and experiment design", - "Model evaluation and validation", - ], - resources: [ - { - title: "StatQuest — Statistics Fundamentals (YouTube)", - type: "video", - url: "https://www.youtube.com/playlist?list=PLblh5JKOoLUK0FLuzwntyYI10UQFUhsY9", - free: true, - }, - { - title: "Khan Academy — Inferential Statistics", - type: "interactive", - url: "https://www.khanacademy.org/math/statistics-probability/significance-tests-one-sample", - free: true, - }, - { - title: "Think Stats (Allen Downey)", - type: "textbook", - url: "https://greenteapress.com/thinkstats2/", - free: true, - }, - ], - }, - ], - }, -}; diff --git a/frontend/src/views/math-bridge-progress.ts b/frontend/src/views/math-bridge-progress.ts deleted file mode 100644 index 05055d2..0000000 --- a/frontend/src/views/math-bridge-progress.ts +++ /dev/null @@ -1,245 +0,0 @@ -// ============================================================================ -// Math Bridge — Skill Progress & Readiness Logic -// ============================================================================ - -const STORAGE_KEY = "math-bridge-progress"; - -// === Types === - -export interface SkillProgress { - skillSlug: string; - confidence: 1 | 2 | 3; - lastCheckScore: number | null; - status: "locked" | "active" | "solid"; - attempts: number; - updatedAt: string; -} - -export type ProgressMap = Record; - -// === MVP Skills & Prerequisites === - -export const MVP_SKILLS = new Set([ - "arithmetic", - "algebra", - "functions", - "probability-basics", - "vectors-intro", - "derivatives", - "conditional-probability", - "linear-algebra", -]); - -// prerequisite slug → required before this skill is active -const PREREQUISITES: Record = { - algebra: "arithmetic", - functions: "algebra", - "vectors-intro": "algebra", - derivatives: "functions", - "conditional-probability": "probability-basics", - "linear-algebra": "vectors-intro", -}; - -// Which level each MVP skill belongs to -const SKILL_LEVEL: Record = { - arithmetic: 0, - algebra: 1, - functions: 1, - "probability-basics": 1, - "vectors-intro": 2, - derivatives: 2, - "conditional-probability": 2, - "linear-algebra": 3, -}; - -// Level tab → level number mapping -const LEVEL_NUMBERS: Record = { - core_numeracy: 0, - high_school: 1, - pre_university: 2, - engineering_prep: 3, -}; - -// === Persistence === - -export function loadProgress(): ProgressMap { - try { - const raw = localStorage.getItem(STORAGE_KEY); - if (!raw) return {}; - return JSON.parse(raw) as ProgressMap; - } catch { - return {}; - } -} - -export function saveProgress(map: ProgressMap): void { - localStorage.setItem(STORAGE_KEY, JSON.stringify(map)); -} - -// === Default state for a skill === - -function defaultProgress(slug: string): SkillProgress { - return { - skillSlug: slug, - confidence: 1, - lastCheckScore: null, - status: "active", - attempts: 0, - updatedAt: new Date().toISOString(), - }; -} - -export function getSkillProgress( - map: ProgressMap, - slug: string, -): SkillProgress { - return map[slug] ?? defaultProgress(slug); -} - -// === Status Calculation === - -function isSolid(p: SkillProgress): boolean { - return ( - p.lastCheckScore !== null && p.lastCheckScore >= 70 && p.confidence >= 2 - ); -} - -function isPrerequisiteMet(slug: string, map: ProgressMap): boolean { - const prereq = PREREQUISITES[slug]; - if (!prereq) return true; - const prereqProgress = map[prereq]; - if (!prereqProgress) return false; - return isSolid(prereqProgress); -} - -export function computeStatus( - slug: string, - map: ProgressMap, -): "locked" | "active" | "solid" { - const p = map[slug] ?? defaultProgress(slug); - if (isSolid(p)) return "solid"; - if (!isPrerequisiteMet(slug, map)) return "locked"; - return "active"; -} - -export function recalculateAllStatuses(map: ProgressMap): ProgressMap { - const updated = { ...map }; - for (const slug of MVP_SKILLS) { - const current = updated[slug] ?? defaultProgress(slug); - const newStatus = computeStatus(slug, updated); - updated[slug] = { ...current, status: newStatus }; - } - return updated; -} - -// === Confidence Update === - -export function setConfidence( - map: ProgressMap, - slug: string, - confidence: 1 | 2 | 3, -): ProgressMap { - const current = map[slug] ?? defaultProgress(slug); - const updated = { - ...map, - [slug]: { - ...current, - confidence, - updatedAt: new Date().toISOString(), - }, - }; - return recalculateAllStatuses(updated); -} - -// === Check Score Update === - -export function recordCheckScore( - map: ProgressMap, - slug: string, - score: number, -): ProgressMap { - const current = map[slug] ?? defaultProgress(slug); - const updated = { - ...map, - [slug]: { - ...current, - lastCheckScore: score, - attempts: current.attempts + 1, - updatedAt: new Date().toISOString(), - }, - }; - return recalculateAllStatuses(updated); -} - -// === Level Readiness === - -export interface LevelReadiness { - total: number; - solid: number; - percentage: number; - weakSkills: string[]; - nextRecommended: string | null; -} - -export function getLevelReadiness( - levelTab: string, - map: ProgressMap, -): LevelReadiness { - const levelNum = LEVEL_NUMBERS[levelTab]; - if (levelNum === undefined) { - return { - total: 0, - solid: 0, - percentage: 0, - weakSkills: [], - nextRecommended: null, - }; - } - - const levelSkills = [...MVP_SKILLS].filter( - (slug) => SKILL_LEVEL[slug] === levelNum, - ); - - if (levelSkills.length === 0) { - return { - total: 0, - solid: 0, - percentage: 0, - weakSkills: [], - nextRecommended: null, - }; - } - - const solidSkills = levelSkills.filter((slug) => { - const p = map[slug]; - return p && isSolid(p); - }); - - const weakSkills = levelSkills.filter((slug) => { - const status = computeStatus(slug, map); - return status === "active"; - }); - - const nextRecommended = weakSkills[0] ?? null; - - return { - total: levelSkills.length, - solid: solidSkills.length, - percentage: Math.round((solidSkills.length / levelSkills.length) * 100), - weakSkills, - nextRecommended, - }; -} - -// === Foundations Readiness === - -const LEVEL_3_SKILLS = [...MVP_SKILLS].filter( - (slug) => SKILL_LEVEL[slug] === 3, -); - -export function isFoundationsReady(map: ProgressMap): boolean { - return LEVEL_3_SKILLS.every((slug) => { - const p = map[slug]; - return p && isSolid(p); - }); -} From ba5bb948ff2b538bdf80302f6652bfad2d11a40b Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Tue, 17 Mar 2026 23:28:46 +0100 Subject: [PATCH 06/10] refactor(curriculum): replace track sort order with category and track ordering --- backend/api/concepts.py | 10 + backend/jobs/merge_concepts.py | 305 ++++++++++++++++++ backend/jobs/prune_orphan_concepts.py | 247 ++++++++++++++ backend/jobs/seed_applied_systems_concepts.py | 116 +++++++ .../jobs/seed_data_engineering_concepts.py | 114 +++++++ backend/jobs/seed_ml_foundations_concepts.py | 132 ++++++++ backend/jobs/seed_mlops_concepts.py | 114 +++++++ backend/jobs/seed_practical_ml_concepts.py | 113 +++++++ backend/models/curriculum.py | 6 + .../services/infrastructure/db_migrations.py | 23 ++ .../services/learning/concept_explainer.py | 62 +++- .../services/learning/curriculum_loader.py | 9 +- backend/services/learning/curriculum_store.py | 7 +- curriculum/tracks/ai-engineering.yaml | 3 + curriculum/tracks/applied-systems.yaml | 264 +++++---------- curriculum/tracks/behavioral-design.yaml | 3 + curriculum/tracks/bio-augmentation.yaml | 3 + curriculum/tracks/cognitive-toolkit.yaml | 3 + curriculum/tracks/data-engineering.yaml | 115 +++++++ curriculum/tracks/databases.yaml | 3 + curriculum/tracks/embodied-ai.yaml | 3 + curriculum/tracks/freelance-strategy.yaml | 3 + curriculum/tracks/frontend-engineering.yaml | 3 + curriculum/tracks/gpu-for-ai.yaml | 3 + curriculum/tracks/interview-prep.yaml | 3 + curriculum/tracks/ml-foundations.yaml | 193 +++++++++-- curriculum/tracks/mlops.yaml | 115 +++++++ curriculum/tracks/practical-ml.yaml | 113 +++++++ frontend/src/lib/api/types.ts | 13 + frontend/src/lib/learner-profile.ts | 126 ++++++++ frontend/src/views/ConceptDetailView.tsx | 99 +++++- frontend/src/views/CurriculumTracksView.tsx | 98 ++++-- 32 files changed, 2178 insertions(+), 246 deletions(-) create mode 100644 backend/jobs/merge_concepts.py create mode 100644 backend/jobs/prune_orphan_concepts.py create mode 100644 backend/jobs/seed_applied_systems_concepts.py create mode 100644 backend/jobs/seed_data_engineering_concepts.py create mode 100644 backend/jobs/seed_ml_foundations_concepts.py create mode 100644 backend/jobs/seed_mlops_concepts.py create mode 100644 backend/jobs/seed_practical_ml_concepts.py create mode 100644 curriculum/tracks/data-engineering.yaml create mode 100644 curriculum/tracks/mlops.yaml create mode 100644 curriculum/tracks/practical-ml.yaml create mode 100644 frontend/src/lib/learner-profile.ts diff --git a/backend/api/concepts.py b/backend/api/concepts.py index 30d14c1..7e77646 100644 --- a/backend/api/concepts.py +++ b/backend/api/concepts.py @@ -83,6 +83,13 @@ class StructuredExplanationResponse(BaseModel): prerequisite_note: str | None = None +class ChunkPreviewResponse(BaseModel): + source_id: str + title: str + preview: str + relevance_score: float = 0.0 + + class ExplanationResponse(BaseModel): concept_id: str concept_name: str @@ -90,6 +97,9 @@ class ExplanationResponse(BaseModel): structured: StructuredExplanationResponse | None = None source_ids: list[str] chunk_count: int + source_titles: dict[str, str] = {} + dossier_grounded: bool = False + chunk_previews: list[ChunkPreviewResponse] = [] class PracticeRequest(BaseModel): diff --git a/backend/jobs/merge_concepts.py b/backend/jobs/merge_concepts.py new file mode 100644 index 0000000..5003821 --- /dev/null +++ b/backend/jobs/merge_concepts.py @@ -0,0 +1,305 @@ +"""Merge duplicate concepts into canonical targets. + +Reusable script: define merge pairs, transfer all child records from +loser to winner, absorb aliases, then delete the loser. + +Usage: + python -m backend.jobs.merge_concepts # dry run (default) + python -m backend.jobs.merge_concepts --execute # actually merge +""" + +import argparse +import sqlite3 +import sys +from dataclasses import dataclass, field +from pathlib import Path + +DB_PATH = Path("data/samaritan.db") + + +@dataclass +class MergePair: + winner: str + loser: str + + +@dataclass +class MergeStats: + aliases_added: int = 0 + atoms_moved: int = 0 + atom_targets_moved: int = 0 + dossiers_skipped: int = 0 + dossiers_deleted: int = 0 + prerequisites_moved: int = 0 + practice_moved: int = 0 + progress_moved: int = 0 + curriculum_moved: int = 0 + source_count_delta: int = 0 + + +# ── Merge pairs: (winner_id, loser_id) ────────────────────────────────── +MERGE_PAIRS = [ + MergePair("principal-component-analysis", "principal-components-analysis"), + MergePair("low-rank-matrix-approximation", "matrix-low-rank-approximation"), + MergePair("reranking", "reranker"), + MergePair("transformer-architecture", "transformers"), +] + + +def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> MergeStats: + """Merge loser into winner. Returns stats of what was (or would be) done.""" + stats = MergeStats() + w, l = pair.winner, pair.loser + + # Verify both exist + winner_row = conn.execute("SELECT id, source_count FROM concepts WHERE id = ?", (w,)).fetchone() + loser_row = conn.execute("SELECT id, source_count FROM concepts WHERE id = ?", (l,)).fetchone() + if not winner_row: + print(f" ERROR: winner {w!r} not found in concepts table") + return stats + if not loser_row: + print(f" WARNING: loser {l!r} not found — already merged?") + return stats + + # 1. Absorb aliases: add loser's aliases pointing to winner (skip dupes) + loser_aliases = conn.execute( + "SELECT alias FROM concept_aliases WHERE concept_id = ?", (l,) + ).fetchall() + winner_aliases = { + row[0] + for row in conn.execute( + "SELECT alias FROM concept_aliases WHERE concept_id = ?", (w,) + ).fetchall() + } + for (alias,) in loser_aliases: + if alias not in winner_aliases: + stats.aliases_added += 1 + if execute: + # Delete from loser first (PK is alias+concept_id) + conn.execute( + "DELETE FROM concept_aliases WHERE alias = ? AND concept_id = ?", + (alias, l), + ) + conn.execute( + "INSERT OR IGNORE INTO concept_aliases (alias, concept_id) VALUES (?, ?)", + (alias, w), + ) + else: + # Duplicate alias — just delete loser's copy + if execute: + conn.execute( + "DELETE FROM concept_aliases WHERE alias = ? AND concept_id = ?", + (alias, l), + ) + + # 2. Move concept_atoms (concept_id) + atom_count = conn.execute( + "SELECT COUNT(*) FROM concept_atoms WHERE concept_id = ?", (l,) + ).fetchone()[0] + stats.atoms_moved = atom_count + if execute and atom_count: + conn.execute("UPDATE concept_atoms SET concept_id = ? WHERE concept_id = ?", (w, l)) + + # 3. Move concept_atoms (target_concept_id) + target_count = conn.execute( + "SELECT COUNT(*) FROM concept_atoms WHERE target_concept_id = ?", (l,) + ).fetchone()[0] + stats.atom_targets_moved = target_count + if execute and target_count: + conn.execute( + "UPDATE concept_atoms SET target_concept_id = ? WHERE target_concept_id = ?", + (w, l), + ) + + # 4. Handle concept_dossiers (1:1 — winner takes priority) + winner_dossier = conn.execute( + "SELECT concept_id FROM concept_dossiers WHERE concept_id = ?", (w,) + ).fetchone() + loser_dossier = conn.execute( + "SELECT concept_id FROM concept_dossiers WHERE concept_id = ?", (l,) + ).fetchone() + if loser_dossier: + if winner_dossier: + stats.dossiers_skipped = 1 # winner already has one + else: + stats.dossiers_skipped = 0 + # Transfer loser's dossier to winner + if execute: + conn.execute( + "UPDATE concept_dossiers SET concept_id = ? WHERE concept_id = ?", + (w, l), + ) + stats.dossiers_deleted = 1 if winner_dossier else 0 + if execute and winner_dossier: + conn.execute("DELETE FROM concept_dossiers WHERE concept_id = ?", (l,)) + + # 5. Move concept_prerequisites (both directions, skip dupes) + for col in ("concept_id", "prerequisite_id"): + other_col = "prerequisite_id" if col == "concept_id" else "concept_id" + rows = conn.execute( + f"SELECT {other_col} FROM concept_prerequisites WHERE {col} = ?", (l,) + ).fetchall() + for (other_id,) in rows: + # Check if winner already has this edge + if col == "concept_id": + exists = conn.execute( + "SELECT 1 FROM concept_prerequisites WHERE concept_id = ? AND prerequisite_id = ?", + (w, other_id), + ).fetchone() + else: + exists = conn.execute( + "SELECT 1 FROM concept_prerequisites WHERE concept_id = ? AND prerequisite_id = ?", + (other_id, w), + ).fetchone() + if not exists: + stats.prerequisites_moved += 1 + if execute: + conn.execute( + f"UPDATE concept_prerequisites SET {col} = ? WHERE {col} = ? AND {other_col} = ?", + (w, l, other_id), + ) + else: + if execute: + conn.execute( + f"DELETE FROM concept_prerequisites WHERE {col} = ? AND {other_col} = ?", + (l, other_id), + ) + + # 6. Move practice_items + practice_count = conn.execute( + "SELECT COUNT(*) FROM practice_items WHERE concept_id = ?", (l,) + ).fetchone()[0] + stats.practice_moved = practice_count + if execute and practice_count: + conn.execute("UPDATE practice_items SET concept_id = ? WHERE concept_id = ?", (w, l)) + + # 7. Move concept_progress (skip if winner already has entry for same user) + progress_rows = conn.execute( + "SELECT user_id FROM concept_progress WHERE concept_id = ?", (l,) + ).fetchall() + for (user_id,) in progress_rows: + exists = conn.execute( + "SELECT 1 FROM concept_progress WHERE user_id = ? AND concept_id = ?", + (user_id, w), + ).fetchone() + if not exists: + stats.progress_moved += 1 + if execute: + conn.execute( + "UPDATE concept_progress SET concept_id = ? WHERE user_id = ? AND concept_id = ?", + (w, user_id, l), + ) + else: + if execute: + conn.execute( + "DELETE FROM concept_progress WHERE user_id = ? AND concept_id = ?", + (user_id, l), + ) + + # 8. Move curriculum_module_concepts (skip dupes) + curriculum_rows = conn.execute( + "SELECT module_id, track_id FROM curriculum_module_concepts WHERE concept_id = ?", + (l,), + ).fetchall() + for module_id, track_id in curriculum_rows: + exists = conn.execute( + "SELECT 1 FROM curriculum_module_concepts WHERE module_id = ? AND track_id = ? AND concept_id = ?", + (module_id, track_id, w), + ).fetchone() + if not exists: + stats.curriculum_moved += 1 + if execute: + conn.execute( + "UPDATE curriculum_module_concepts SET concept_id = ? WHERE module_id = ? AND track_id = ? AND concept_id = ?", + (w, module_id, track_id, l), + ) + else: + if execute: + conn.execute( + "DELETE FROM curriculum_module_concepts WHERE module_id = ? AND track_id = ? AND concept_id = ?", + (module_id, track_id, l), + ) + + # 9. Update winner source_count (additive from loser, avoid double-count) + loser_source_count = loser_row[1] + if loser_source_count > 0: + stats.source_count_delta = loser_source_count + if execute: + conn.execute( + "UPDATE concepts SET source_count = source_count + ? WHERE id = ?", + (loser_source_count, w), + ) + + # 10. Delete remaining loser aliases (cleanup) then loser concept + if execute: + conn.execute("DELETE FROM concept_aliases WHERE concept_id = ?", (l,)) + conn.execute("DELETE FROM concepts WHERE id = ?", (l,)) + + return stats + + +def main(): + parser = argparse.ArgumentParser(description="Merge duplicate concepts") + parser.add_argument( + "--execute", action="store_true", help="Actually merge (default is dry run)" + ) + args = parser.parse_args() + + if not DB_PATH.exists(): + print(f"ERROR: DB not found at {DB_PATH}") + sys.exit(1) + + conn = sqlite3.connect(str(DB_PATH)) + conn.execute("PRAGMA foreign_keys = ON") + + total_before = conn.execute("SELECT COUNT(*) FROM concepts").fetchone()[0] + print(f"Total concepts before: {total_before}") + print(f"Mode: {'EXECUTE' if args.execute else 'DRY RUN'}") + print() + + total_deleted = 0 + for pair in MERGE_PAIRS: + print(f"{'Merging' if args.execute else 'Would merge'}: {pair.loser} -> {pair.winner}") + stats = merge_one(conn, pair, execute=args.execute) + total_deleted += 1 if args.execute else 0 + + changes = [] + if stats.aliases_added: + changes.append(f"aliases +{stats.aliases_added}") + if stats.atoms_moved: + changes.append(f"atoms moved {stats.atoms_moved}") + if stats.atom_targets_moved: + changes.append(f"atom targets moved {stats.atom_targets_moved}") + if stats.dossiers_skipped: + changes.append("dossier: winner kept, loser discarded") + if stats.prerequisites_moved: + changes.append(f"prerequisites moved {stats.prerequisites_moved}") + if stats.practice_moved: + changes.append(f"practice moved {stats.practice_moved}") + if stats.progress_moved: + changes.append(f"progress moved {stats.progress_moved}") + if stats.curriculum_moved: + changes.append(f"curriculum refs moved {stats.curriculum_moved}") + if stats.source_count_delta: + changes.append(f"source_count +{stats.source_count_delta}") + if changes: + print(f" {', '.join(changes)}") + else: + print(" aliases absorbed only") + print() + + if args.execute: + conn.commit() + + total_after = conn.execute("SELECT COUNT(*) FROM concepts").fetchone()[0] + conn.close() + + print(f"Concepts deleted: {total_before - total_after}") + print(f"Concepts remaining: {total_after}") + + if not args.execute: + print("\nDry run — no changes made. Pass --execute to merge.") + + +if __name__ == "__main__": + main() diff --git a/backend/jobs/prune_orphan_concepts.py b/backend/jobs/prune_orphan_concepts.py new file mode 100644 index 0000000..b405612 --- /dev/null +++ b/backend/jobs/prune_orphan_concepts.py @@ -0,0 +1,247 @@ +"""Prune orphan concepts that have no curriculum value. + +One-shot script. Deletes concepts in the DELETE_NOW bucket only. +Does NOT touch HOLD_FOUNDATIONAL, MERGE_REVIEW, or YAML-linked concepts. + +Usage: + python -m backend.jobs.prune_orphan_concepts # dry run (default) + python -m backend.jobs.prune_orphan_concepts --execute # actually delete +""" + +import argparse +import sqlite3 +import sys +from pathlib import Path + +DB_PATH = Path("data/samaritan.db") + +# ── Final DELETE_NOW bucket (68 concepts) ──────────────────────────────── +# Original 74 minus 6 moved to HOLD_FOUNDATIONAL: +# batch-normalization, weight-initialization, hinge-loss, +# euclidean-distance, inner-product, dynamic-programming +DELETE_NOW = sorted([ + "adaptive-learning-rates", + "angular-distance", + "approximate-matrix-multiplication", + "backward-phase", + "centered-cosine-similarity", + "centered-difference-formula", + "componentization-via-services", + "condition-number", + "context-memory", + "cumulative-distribution-function", + "cur-decomposition", + "data-matrix", + "data-normalization", + "data-scaling", + "delta-rule", + "double-descent", + "em-algorithm", + "error-function", + "factor-analysis", + "feedforward-neural-networks", + "forward-phase", + "four-fundamental-subspaces", + "frobenius-norm", + "fusion-algorithms", + "gaussian-discriminant-analysis", + "generalized-linear-models", + "gradient-checking", + "gradient-operations", + "hessian", + "identity-matrix", + "independent-component-analysis", + "inverted-dropout", + "kernel-methods", + "latent-semantic-indexing", + "linear-quadratic-regulation", + "majorization-theory", + "markov-decision-processes", + "max-norm-constraints", + "mean-subtraction", + "mixture-of-gaussians", + "model-scaling", + "momentum-update", + "nesterov-momentum", + "nystrom-approximation", + "orthogonal-matrices", + "orthonormal-bases", + "outer-product", + "policy-gradient", + "policy-iteration", + "positive-semi-definite-matrix", + "prompt-injection", + "prompt-optimization", + "randomized-svd", + "rayleigh-quotient", + "relative-error", + "sanity-checks", + "separable-updates", + "soft-cosine-measure", + "square-svd", + "symmetric-decomposition", + "symmetric-gauge-functions", + "symmetry-breaking", + "tall-and-skinny-svd", + "term-frequency-vectors", + "triangle-inequality", + "unitarily-invariant-norms", + "value-iteration", + "whitening", +]) + +# ── Safety sets: NEVER delete these ────────────────────────────────────── +HOLD_FOUNDATIONAL = { + "deep-learning", + "supervised-learning", + "unsupervised-learning", + "reinforcement-learning", + "self-supervised-learning", + "clustering", + "naive-bayes", + "k-means", + "support-vector-machines", + "artificial-neural-networks", + "gradient", + "regularization", + "dropout", + "bias-variance-tradeoff", + "dimensionality-reduction", + # 6 moved from DELETE_NOW + "batch-normalization", + "weight-initialization", + "hinge-loss", + "euclidean-distance", + "inner-product", + "dynamic-programming", +} + +MERGE_REVIEW = { + "principal-component-analysis", + "principal-components-analysis", + "low-rank-matrix-approximation", + "matrix-low-rank-approximation", + "reranker", + "transformers", +} + + +def get_yaml_concept_ids() -> set[str]: + """Extract all concept_ids from curriculum YAML tracks.""" + import yaml + + ids: set[str] = set() + tracks_dir = Path("curriculum/tracks") + for f in tracks_dir.glob("*.yaml"): + with open(f) as fh: + data = yaml.safe_load(fh) + if not data or "modules" not in data: + continue + for module in data["modules"]: + for concept in module.get("concepts", []): + cid = concept.get("concept_id") + if cid: + ids.add(cid) + return ids + + +def main(): + parser = argparse.ArgumentParser(description="Prune orphan concepts") + parser.add_argument("--execute", action="store_true", help="Actually delete (default is dry run)") + args = parser.parse_args() + + if not DB_PATH.exists(): + print(f"ERROR: DB not found at {DB_PATH}") + sys.exit(1) + + yaml_ids = get_yaml_concept_ids() + + # ── Safety checks ──────────────────────────────────────────────────── + violations = [] + for cid in DELETE_NOW: + if cid in HOLD_FOUNDATIONAL: + violations.append(f" {cid} is in HOLD_FOUNDATIONAL") + if cid in MERGE_REVIEW: + violations.append(f" {cid} is in MERGE_REVIEW") + if cid in yaml_ids: + violations.append(f" {cid} is referenced in YAML tracks") + + if violations: + print("SAFETY CHECK FAILED — aborting:") + for v in violations: + print(v) + sys.exit(1) + + conn = sqlite3.connect(str(DB_PATH)) + conn.execute("PRAGMA foreign_keys = ON") + + # ── Pre-deletion counts ────────────────────────────────────────────── + total_before = conn.execute("SELECT COUNT(*) FROM concepts").fetchone()[0] + + # Verify all DELETE_NOW IDs exist + placeholders = ",".join("?" * len(DELETE_NOW)) + existing = conn.execute( + f"SELECT id FROM concepts WHERE id IN ({placeholders})", DELETE_NOW + ).fetchall() + existing_ids = {row[0] for row in existing} + missing = set(DELETE_NOW) - existing_ids + if missing: + print(f"WARNING: {len(missing)} concept IDs not found in DB (already deleted?):") + for m in sorted(missing): + print(f" {m}") + + to_delete = sorted(existing_ids) + print(f"Total concepts before: {total_before}") + print(f"Concepts to delete: {len(to_delete)}") + print(f"Mode: {'EXECUTE' if args.execute else 'DRY RUN'}") + print() + + if not args.execute: + print("Dry run — no changes made. Pass --execute to delete.") + print() + print("Would delete:") + for cid in to_delete: + print(f" {cid}") + conn.close() + return + + # ── Delete from all referencing tables ──────────────────────────────── + deleted = 0 + for cid in to_delete: + # Child tables (explicit cleanup, FK CASCADE may not be enabled) + conn.execute("DELETE FROM concept_dossiers WHERE concept_id = ?", (cid,)) + conn.execute("DELETE FROM concept_atoms WHERE concept_id = ?", (cid,)) + conn.execute("DELETE FROM concept_atoms WHERE target_concept_id = ?", (cid,)) + conn.execute("DELETE FROM practice_items WHERE concept_id = ?", (cid,)) + conn.execute("DELETE FROM concept_prerequisites WHERE concept_id = ? OR prerequisite_id = ?", (cid, cid)) + conn.execute("DELETE FROM concept_aliases WHERE concept_id = ?", (cid,)) + conn.execute("DELETE FROM concept_progress WHERE concept_id = ?", (cid,)) + conn.execute("DELETE FROM curriculum_module_concepts WHERE concept_id = ?", (cid,)) + # Parent row + cursor = conn.execute("DELETE FROM concepts WHERE id = ?", (cid,)) + deleted += cursor.rowcount + + conn.commit() + + # ── Post-deletion counts ───────────────────────────────────────────── + total_after = conn.execute("SELECT COUNT(*) FROM concepts").fetchone()[0] + + # Verify YAML concepts untouched + yaml_remaining = conn.execute( + f"SELECT COUNT(*) FROM concepts WHERE id IN ({','.join('?' * len(yaml_ids))})", + sorted(yaml_ids), + ).fetchone()[0] + + conn.close() + + print(f"Concepts deleted: {deleted}") + print(f"Concepts remaining: {total_after}") + print(f"YAML-linked concepts: {yaml_remaining} of {len(yaml_ids)} intact") + if yaml_remaining == len(yaml_ids): + print("CONFIRMED: No YAML-linked concepts were touched.") + else: + print("WARNING: Some YAML-linked concepts are missing!") + + +if __name__ == "__main__": + main() diff --git a/backend/jobs/seed_applied_systems_concepts.py b/backend/jobs/seed_applied_systems_concepts.py new file mode 100644 index 0000000..1563b10 --- /dev/null +++ b/backend/jobs/seed_applied_systems_concepts.py @@ -0,0 +1,116 @@ +"""Bootstrap Track 5 (Applied ML Systems) concepts and dossiers. + +Inserts 26 concept rows + real dossiers (definition, intuition, formula, +ml_usage) so the explainer and practice generator have usable content. + +Usage: + python -m backend.jobs.seed_applied_systems_concepts + python -m backend.jobs.seed_applied_systems_concepts --dry-run +""" + +import argparse +import json +import logging +import sys + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +SRC = "curriculum:applied-systems-v1" + +# (id, name, description, lens, level, intuition, formula, ml_usage) +CONCEPTS = [ + # Module 1: Retrieval and Ranking + ("query-understanding", "Query Understanding", "Parsing and expanding user queries to capture intent, synonyms, and context before retrieval.", "machine_learning", "university", "Figure out what the user actually means before searching.", None, "Google query rewriting, e-commerce search intent classification."), + ("inverted-index", "Inverted Index", "Data structure mapping terms to the documents that contain them for fast keyword lookup.", "machine_learning", "university", "A reverse phone book - look up a word, get all documents that mention it.", None, "Elasticsearch, Lucene, BM25 retrieval in every search engine."), + ("semantic-search", "Semantic Search", "Retrieval using dense vector embeddings to match meaning rather than exact keywords.", "machine_learning", "university", "Find results that mean the same thing even if the words are different.", "score = cos(embed(query), embed(doc))", "Product search, documentation search, RAG systems."), + ("hybrid-retrieval", "Hybrid Retrieval", "Combining sparse keyword search and dense semantic search to get the best of both.", "machine_learning", "university", "Use keywords for precision and embeddings for meaning, then merge the results.", "score = alpha * bm25(q, d) + (1 - alpha) * cos(q, d)", "Production search at Airbnb, Spotify, most RAG pipelines."), + ("re-ranking", "Re-Ranking", "Second-stage model that re-scores a shortlist of candidates with a more expensive model.", "machine_learning", "university", "Retrieve fast and cheap, then re-score the top results carefully.", None, "Cross-encoder re-rankers, Cohere Rerank, Google search."), + ("learning-to-rank-pipeline", "Learning-to-Rank Pipeline", "End-to-end system that trains ranking models from logged user interactions and feature signals.", "machine_learning", "university", "Build a ranking model from click logs, features, and relevance labels, then deploy it.", None, "Search engines, ad ranking, feed ordering at LinkedIn and Meta."), + + # Module 2: Recommender Architecture + ("candidate-generation", "Candidate Generation", "First stage of a recommendation pipeline that retrieves a broad set of potentially relevant items.", "machine_learning", "university", "Cast a wide net to get a few thousand candidates from millions, fast.", None, "YouTube candidate generation, TikTok first-pass retrieval."), + ("multi-stage-ranking", "Multi-Stage Ranking", "Cascading pipeline where each stage filters and re-ranks candidates with increasing model complexity.", "machine_learning", "university", "Each stage narrows the funnel and uses a more powerful model.", None, "LinkedIn feed: retrieval -> first-pass ranker -> final ranker -> blender."), + ("real-time-personalization", "Real-Time Personalization", "Adapting recommendations to a user's behavior within the current session, not just historical data.", "machine_learning", "university", "Update what you show based on what someone just did, not just what they did last month.", None, "TikTok real-time interest updates, Amazon session-aware recs."), + ("session-based-recommendation", "Session-Based Recommendation", "Generating recommendations for anonymous or new users using only in-session signals.", "machine_learning", "university", "Recommend things when you only know what someone clicked in the last 5 minutes.", None, "E-commerce for non-logged-in users, news site personalization."), + ("multi-objective-ranking", "Multi-Objective Ranking", "Optimizing recommendations for multiple business goals simultaneously - engagement, revenue, diversity.", "machine_learning", "university", "Rank items to balance clicks, purchases, and showing something new.", "score = w1 * p_click + w2 * p_purchase + w3 * diversity", "Instagram Explore, Uber Eats restaurant ranking."), + + # Module 3: Experimentation and Metrics + ("controlled-experiment-design", "Controlled Experiment Design", "Designing A/B tests with proper randomization, control groups, and pre-registered hypotheses.", "machine_learning", "university", "Split users randomly, change one thing, measure the difference, and make sure it's real.", None, "Every product launch at Netflix, Google, Microsoft."), + ("metric-taxonomy", "Metric Taxonomy", "Classifying metrics into north star, guardrail, and proxy metrics with clear relationships.", "machine_learning", "university", "North star is what you optimize, guardrails are what you protect, proxies are what you measure daily.", None, "Netflix retention (north star) vs customer support volume (guardrail)."), + ("sample-size-estimation", "Sample Size Estimation", "Calculating how many users an experiment needs to detect a meaningful effect with confidence.", "machine_learning", "university", "Run the experiment long enough to tell signal from noise, but not longer than necessary.", "n = (z_alpha + z_beta)^2 * 2 * sigma^2 / delta^2", "Power analysis before every A/B test launch."), + ("novelty-primacy-effects", "Novelty and Primacy Effects", "Temporal biases where users initially over-engage with new features (novelty) or resist change (primacy).", "machine_learning", "university", "New features get extra clicks at first - wait for the excitement to wear off before deciding.", None, "Running experiments for 2+ weeks to let novelty effects decay."), + ("interleaving-experiments", "Interleaving Experiments", "Comparing ranking systems by mixing their results in a single list and observing user preferences.", "machine_learning", "university", "Show results from both systems alternated in one list - users vote with their clicks.", None, "Netflix, Spotify, and search engines for faster ranker comparison."), + + # Module 4: System Tradeoffs + ("latency-throughput-tradeoff", "Latency vs Throughput Tradeoff", "Fundamental tension between serving individual requests fast and processing many requests efficiently.", "machine_learning", "university", "Serving one request in 10ms or batching 100 requests at 50ms each - you pick one.", None, "Model serving: single request vs dynamic batching."), + ("batch-vs-real-time-serving", "Batch vs Real-Time Serving", "Choosing between pre-computing predictions offline and computing them on demand at request time.", "machine_learning", "university", "Pre-compute predictions for everyone overnight, or compute on the fly when they ask.", None, "Netflix pre-computes rows, Google Ads scores in real time."), + ("model-complexity-tradeoff", "Model Complexity Tradeoff", "Balancing model accuracy gains against increased latency, cost, and operational burden in production.", "machine_learning", "university", "A 1% accuracy gain isn't worth it if it triples your serving costs.", None, "Choosing logistic regression over deep nets for latency-sensitive ad ranking."), + ("feature-freshness-tradeoff", "Feature Freshness Tradeoff", "Choosing how often to recompute features based on prediction accuracy gains vs compute cost.", "machine_learning", "university", "Hourly features are better than daily, but 24x more expensive to compute.", None, "Real-time click features for feed ranking vs daily aggregates for email recs."), + ("consistency-availability-tradeoff", "Consistency vs Availability Tradeoff", "Deciding whether ML serving should favor consistent predictions or always-available responses.", "machine_learning", "university", "Show a stale recommendation or show nothing while the model catches up.", None, "Fallback to cached predictions when the model service is down."), + + # Module 5: Production Case Studies + ("notification-ranking-system", "Notification Ranking System", "End-to-end system that decides which push notifications to send, when, and to whom.", "machine_learning", "university", "Pick the right notification from hundreds of candidates and send it at the right time.", None, "LinkedIn, Facebook, Duolingo notification optimization."), + ("fraud-detection-system", "Fraud Detection System", "Real-time classification system operating in an adversarial environment where attackers adapt to the model.", "machine_learning", "university", "Catch fraudsters who are actively trying to fool your model, in milliseconds.", None, "Stripe Radar, PayPal, bank transaction monitoring."), + ("search-autocomplete-system", "Search Autocomplete System", "Type-ahead suggestion system that predicts query completions from partial input in under 100ms.", "machine_learning", "university", "Suggest what the user is typing before they finish, fast enough that it feels instant.", None, "Google Suggest, Amazon search bar, Spotify search."), + ("dynamic-pricing-system", "Dynamic Pricing System", "Real-time price optimization balancing demand elasticity, competition, and business constraints.", "machine_learning", "university", "Set prices that maximize revenue by reacting to demand, inventory, and competitor moves.", None, "Uber surge pricing, airline tickets, hotel room rates."), + ("content-moderation-system", "Content Moderation System", "Multi-stage classification pipeline for detecting harmful content at platform scale.", "machine_learning", "university", "Automatically flag toxic content from billions of posts without blocking legitimate speech.", None, "YouTube, TikTok, Twitter content safety pipelines."), +] + + +def seed(dry_run: bool = False) -> bool: + """Seed 26 concept rows + real dossiers from Track 5 artifact.""" + from backend.services.chat.history import get_db, init_db + + init_db() + conn = get_db() + + for cid, name, desc, lens, level, intuition, formula, ml_usage in CONCEPTS: + # Concept row + conn.execute( + """INSERT OR REPLACE INTO concepts + (id, name, description, lens, level, source_count) + VALUES (?, ?, ?, ?, ?, 0)""", + (cid, name, desc, lens, level), + ) + + # Real dossier + formulas = ( + [{"content": formula, "content_latex": formula, + "source_id": SRC, "source_title": None}] + if formula else [] + ) + insights = [{"content": ml_usage, "source_id": SRC}] + atom_count = 2 + (1 if formula else 0) + 1 # def + intuition + formula? + insight + + conn.execute( + """INSERT OR REPLACE INTO concept_dossiers + (concept_id, formal_definition, intuitive_definition, + formulas_json, examples_json, prerequisite_claims_json, + key_insights_json, source_ids_json, + atom_count, source_count, built_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1, datetime('now'))""", + ( + cid, desc, intuition, + json.dumps(formulas), "[]", "[]", + json.dumps(insights), json.dumps([SRC]), + atom_count, + ), + ) + + if dry_run: + logger.info("Dry run — %d concepts validated, rolling back", len(CONCEPTS)) + conn.rollback() + else: + conn.commit() + logger.info("Seeded %d concepts + dossiers", len(CONCEPTS)) + + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bootstrap Track 5 concepts") + parser.add_argument("--dry-run", action="store_true") + args = parser.parse_args() + success = seed(dry_run=args.dry_run) + sys.exit(0 if success else 1) diff --git a/backend/jobs/seed_data_engineering_concepts.py b/backend/jobs/seed_data_engineering_concepts.py new file mode 100644 index 0000000..5b14a25 --- /dev/null +++ b/backend/jobs/seed_data_engineering_concepts.py @@ -0,0 +1,114 @@ +"""Bootstrap Track 4 (Data Engineering for ML Systems) concepts and dossiers. + +Inserts 24 concept rows + real dossiers (definition, intuition, formula, +ml_usage) so the explainer and practice generator have usable content. + +Usage: + python -m backend.jobs.seed_data_engineering_concepts + python -m backend.jobs.seed_data_engineering_concepts --dry-run +""" + +import argparse +import json +import logging +import sys + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +SRC = "curriculum:data-engineering-v1" + +# (id, name, description, lens, level, intuition, formula, ml_usage) +CONCEPTS = [ + # Module 1: Data Modeling + ("relational-schema", "Relational Schema", "Formal definition of tables, columns, types, and constraints that structure a database.", "machine_learning", "university", "A blueprint that says what columns a table has, what types they are, and how tables connect.", None, "Source of truth for feature extraction queries."), + ("normalization-denormalization", "Normalization vs Denormalization", "Tradeoff between eliminating redundancy (normal forms) and pre-joining data for read speed.", "machine_learning", "university", "Store each fact once and join at read time, or duplicate facts so reads are fast.", None, "Normalized for OLTP ingestion, denormalized for ML feature tables."), + ("star-schema", "Star Schema", "Dimensional modeling pattern with a central fact table joined to dimension tables.", "machine_learning", "university", "One big table of events surrounded by smaller tables that describe what, who, when, and where.", None, "Standard layout for analytics warehouses feeding ML dashboards."), + ("slowly-changing-dimensions", "Slowly Changing Dimensions", "Techniques for tracking how dimension attributes change over time.", "machine_learning", "university", "When a customer moves cities, do you overwrite the old address or keep both? SCD answers that.", None, "Point-in-time correct features for training data."), + ("data-contracts", "Data Contracts", "Explicit agreements between data producers and consumers on schema, semantics, and SLAs.", "machine_learning", "university", "A promise from the team that writes the data about what shape it will be and when it arrives.", None, "Prevents silent schema changes from breaking feature pipelines."), + + # Module 2: Storage Systems + ("data-warehouse", "Data Warehouse", "Centralized analytical database optimized for complex queries over structured historical data.", "machine_learning", "university", "One big organized database where all your business data lives for analysis.", None, "BigQuery, Snowflake, Redshift - where most ML feature queries start."), + ("data-lake", "Data Lake", "Scalable storage layer that holds raw data in its original format until needed.", "machine_learning", "university", "Dump everything into cheap storage now, figure out how to use it later.", None, "S3, GCS, ADLS - landing zone for unstructured data before featurization."), + ("lakehouse-architecture", "Lakehouse Architecture", "Hybrid storage combining data lake scalability with data warehouse query performance and ACID guarantees.", "machine_learning", "university", "Get the cheap storage of a lake with the fast queries of a warehouse.", None, "Delta Lake, Apache Iceberg, Apache Hudi."), + ("columnar-storage", "Columnar Storage", "File format that stores data by column rather than by row for efficient analytical queries.", "machine_learning", "university", "Instead of storing row by row, store all values of one column together so aggregations are fast.", None, "Parquet, ORC - default format for large-scale feature datasets."), + ("partitioning-and-clustering", "Partitioning and Clustering", "Physical data organization strategies that reduce the amount of data scanned per query.", "machine_learning", "university", "Organize files by date or category so queries only read what they need instead of everything.", None, "Partition by date, cluster by user_id - critical for training data extraction speed."), + + # Module 3: Transformation Pipelines + ("batch-processing", "Batch Processing", "Processing data in large, scheduled chunks rather than one record at a time.", "machine_learning", "university", "Collect a day's worth of data, then process it all at once overnight.", None, "Spark, BigQuery scheduled queries, nightly feature refreshes."), + ("stream-processing", "Stream Processing", "Processing data continuously as it arrives, record by record or in micro-batches.", "machine_learning", "university", "Process each event the moment it happens instead of waiting for a batch.", None, "Kafka Streams, Flink, real-time feature updates for fraud detection."), + ("dbt-transformations", "dbt Transformations", "SQL-first transformation framework that versions, tests, and documents data models.", "machine_learning", "university", "Write SQL SELECT statements and dbt handles the rest - dependencies, testing, docs.", None, "Standard tool for warehouse transformations upstream of feature stores."), + ("data-quality-testing", "Data Quality Testing", "Automated assertions that validate data completeness, accuracy, and consistency after each pipeline run.", "machine_learning", "university", "After every pipeline run, check that no columns are null, no values are crazy, and row counts make sense.", None, "Great Expectations, dbt tests, Soda - catch bad data before it reaches the model."), + ("idempotent-pipelines", "Idempotent Pipelines", "Pipeline design where re-running the same job with the same input produces the same output without side effects.", "machine_learning", "university", "Run the pipeline twice and get the same result - no duplicates, no missing data.", None, "MERGE statements, upserts, partition overwrites for safe retries."), + + # Module 4: Orchestration and Reliability + ("dag-orchestration", "DAG Orchestration", "Scheduling and executing data workflows as directed acyclic graphs with dependency management.", "machine_learning", "university", "Define which jobs depend on which, then let a scheduler run them in the right order.", None, "Airflow, Dagster, Prefect - the backbone of every data platform."), + ("backfill-and-replay", "Backfill and Replay", "Re-processing historical data through updated pipeline logic to correct or enrich past outputs.", "machine_learning", "university", "Your pipeline had a bug last month - rerun it on old data to fix the output.", None, "Recomputing feature tables after a logic fix or schema migration."), + ("data-observability", "Data Observability", "Monitoring data pipelines for freshness, volume, schema changes, and distribution anomalies.", "machine_learning", "university", "Know when your data is late, missing, or looks different from yesterday.", None, "Monte Carlo, Elementary, custom Airflow alerts."), + ("sla-and-freshness", "SLA and Freshness", "Service level agreements defining how recent and reliable data must be for downstream consumers.", "machine_learning", "university", "A promise that the daily sales table will be ready by 6 AM with yesterday's data.", None, "Feature freshness SLAs for real-time vs batch ML systems."), + + # Module 5: Serving and Access + ("feature-store", "Feature Store", "Centralized system for computing, storing, and serving ML features consistently across training and inference.", "machine_learning", "university", "One place to manage all the inputs your models need.", None, "Feast, Tecton, Uber Michelangelo."), + ("reverse-etl", "Reverse ETL", "Syncing processed warehouse data back to operational tools like CRMs, ad platforms, and product databases.", "machine_learning", "university", "Push your warehouse insights back into the tools your team actually uses.", None, "Census, Hightouch - activate ML scores in Salesforce, Braze, etc."), + ("materialized-views", "Materialized Views", "Pre-computed query results stored as tables and refreshed on a schedule for fast reads.", "machine_learning", "university", "Run an expensive query once, save the result as a table, and read from that instead.", None, "Serving layer for dashboards and low-latency feature lookups."), + ("data-catalog", "Data Catalog", "Searchable inventory of all datasets, their schemas, owners, lineage, and usage metadata.", "machine_learning", "university", "A search engine for your company's data - find what exists, who owns it, and what it means.", None, "Datahub, Amundsen, OpenMetadata - find the right table for feature engineering."), + ("access-control-and-governance", "Access Control and Governance", "Policies and systems that control who can read, write, and share data across an organization.", "machine_learning", "university", "Rules about who can see what data and what they're allowed to do with it.", None, "Column-level security, PII masking, GDPR compliance for training data."), +] + + +def seed(dry_run: bool = False) -> bool: + """Seed 24 concept rows + real dossiers from Track 4 artifact.""" + from backend.services.chat.history import get_db, init_db + + init_db() + conn = get_db() + + for cid, name, desc, lens, level, intuition, formula, ml_usage in CONCEPTS: + # Concept row + conn.execute( + """INSERT OR REPLACE INTO concepts + (id, name, description, lens, level, source_count) + VALUES (?, ?, ?, ?, ?, 0)""", + (cid, name, desc, lens, level), + ) + + # Real dossier + formulas = ( + [{"content": formula, "content_latex": formula, + "source_id": SRC, "source_title": None}] + if formula else [] + ) + insights = [{"content": ml_usage, "source_id": SRC}] + atom_count = 2 + (1 if formula else 0) + 1 # def + intuition + formula? + insight + + conn.execute( + """INSERT OR REPLACE INTO concept_dossiers + (concept_id, formal_definition, intuitive_definition, + formulas_json, examples_json, prerequisite_claims_json, + key_insights_json, source_ids_json, + atom_count, source_count, built_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1, datetime('now'))""", + ( + cid, desc, intuition, + json.dumps(formulas), "[]", "[]", + json.dumps(insights), json.dumps([SRC]), + atom_count, + ), + ) + + if dry_run: + logger.info("Dry run — %d concepts validated, rolling back", len(CONCEPTS)) + conn.rollback() + else: + conn.commit() + logger.info("Seeded %d concepts + dossiers", len(CONCEPTS)) + + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bootstrap Track 4 concepts") + parser.add_argument("--dry-run", action="store_true") + args = parser.parse_args() + success = seed(dry_run=args.dry_run) + sys.exit(0 if success else 1) diff --git a/backend/jobs/seed_ml_foundations_concepts.py b/backend/jobs/seed_ml_foundations_concepts.py new file mode 100644 index 0000000..46a64eb --- /dev/null +++ b/backend/jobs/seed_ml_foundations_concepts.py @@ -0,0 +1,132 @@ +"""Bootstrap Track 1 (ML Foundations) concepts and dossiers. + +Inserts 42 concept rows + real dossiers (definition, intuition, formula, +ml_usage) so the explainer and practice generator have usable content. + +Usage: + python -m backend.jobs.seed_ml_foundations_concepts + python -m backend.jobs.seed_ml_foundations_concepts --dry-run +""" + +import argparse +import json +import logging +import sys + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +SRC = "curriculum:ml-foundations-v1" + +# (id, name, description, lens, level, intuition, formula, ml_usage) +CONCEPTS = [ + # Module 1: Numbers, Formulas, Functions + ("number", "Number", "Basic scalar quantity used in arithmetic and formulas.", "math", "primary", "Base quantity.", None, "All inputs, outputs, and parameters."), + ("fraction", "Fraction", "Part-over-whole representation of division.", "math", "primary", "Part of a whole.", "a / b", "Probabilities, means, rates."), + ("ratio", "Ratio", "Relative comparison between two quantities.", "math", "primary", "Relative size.", "a / b", "Scaling, normalization, uncertainty."), + ("variable", "Variable", "Symbol representing an unknown or changing quantity.", "math", "primary", "Placeholder for a value.", "x, y, w", "Features, targets, parameters."), + ("algebraic-expression", "Algebraic Expression", "Combination of variables, constants, and operations.", "math", "college", "Compact symbolic calculation.", "ax + b", "Model equations and updates."), + ("function", "Function", "Rule mapping inputs to outputs.", "math", "college", "Input-output rule.", "y = f(x)", "Models, activations, losses."), + ("exponent", "Exponent", "Power operation used for growth and squaring.", "math", "college", "Growth or squaring.", "x^p", "Squared error, regularization."), + ("logarithm", "Logarithm", "Inverse of exponentiation.", "math", "high_school", "Inverse of exponent; compresses scale.", "log(x)", "Likelihood-based losses."), + ("square-root", "Square Root", "Inverse of squaring.", "math", "college", "Undo squaring.", "sqrt(x)", "Standard deviation, standard error."), + # Module 2: Describing Data + ("mean", "Mean", "Arithmetic average of a set of values.", "statistics", "college", "Center of values.", "sum / count", "Baseline summary."), + ("sample-mean", "Sample Mean", "Average computed from observed sample values.", "statistics", "high_school", "Average of observed sample.", "x_bar = (1/n) * sum(x_i)", "Estimate true average."), + ("deviation-from-mean", "Deviation from Mean", "Difference between a value and the sample mean.", "statistics", "high_school", "Distance from center.", "x_i - x_bar", "Spread and association measures."), + ("variance", "Variance", "Average squared deviation from the mean.", "statistics", "high_school", "Spread in squared units.", "average[(x - x_bar)^2]", "Uncertainty and diagnostics."), + ("standard-deviation", "Standard Deviation", "Typical spread of values around the mean.", "statistics", "high_school", "Typical spread in original units.", "sd = sqrt(variance)", "Scaling and uncertainty."), + ("covariance", "Covariance", "Measure of how two variables move together.", "statistics", "high_school", "How two things move together.", "Cov(X, Y)", "Regression intuition."), + ("correlation", "Correlation", "Normalized measure of linear association.", "statistics", "high_school", "Strength of linear relationship.", "Cov / (sd_x * sd_y)", "Feature relationship checks."), + ("z-score", "Z-Score", "Distance from the mean measured in standard deviation units.", "statistics", "high_school", "How unusual a value is.", "(x - mean) / sd", "Standardization and inference."), + # Module 3: Probability and Bayes + ("probability", "Probability", "Quantification of uncertainty on a 0 to 1 scale.", "statistics", "high_school", "Chance of an event.", "P(A)", "Predictions and uncertainty."), + ("conditional-probability", "Conditional Probability", "Probability of an event given known evidence.", "statistics", "high_school", "Chance given known evidence.", "P(A|B)", "Classification and Bayes."), + ("independence", "Independence", "Condition where one event does not change another's probability.", "statistics", "high_school", "Knowing one thing changes nothing about another.", "P(A|B) = P(A)", "Model simplification."), + ("bayes-rule", "Bayes' Rule", "Rule for updating probabilities using evidence.", "statistics", "high_school", "Update belief with evidence.", "P(A|B) = P(B|A) * P(A) / P(B)", "Spam, diagnosis, probabilistic reasoning."), + # Module 4: From Sample to Population + ("sampling", "Sampling", "Selecting a subset of a population for analysis.", "statistics", "university", "Use a subset to learn about a whole.", None, "Training/evaluation and inference."), + ("estimator", "Estimator", "Statistic used to infer an unknown population quantity.", "statistics", "university", "Formula that guesses an unknown quantity.", "x_bar estimates mu", "Parameter estimation."), + ("standard-error", "Standard Error", "Expected variability of a sample estimate.", "statistics", "university", "How much a sample estimate moves across repeated samples.", "SE = sd / sqrt(n)", "Confidence intervals and tests."), + ("confidence-interval", "Confidence Interval", "Range likely to contain the true population value.", "statistics", "university", "Likely range for the true value.", "estimate +/- margin", "Metric uncertainty."), + ("hypothesis-test", "Hypothesis Test", "Procedure for deciding whether observed data is too unlikely under a claim.", "statistics", "university", "Check if observed difference is likely noise.", None, "A/B testing and experiment decisions."), + # Module 5: Linear Prediction + ("feature", "Feature", "Input variable used by a model.", "machine_learning", "university", "Input signal used by a model.", "x_j", "Model inputs."), + ("linear-model", "Linear Model", "Prediction from a weighted combination of features.", "machine_learning", "university", "Weighted combination of features.", "score = w * x + b", "Core prediction skeleton."), + ("linear-regression", "Linear Regression", "Model for predicting continuous targets.", "machine_learning", "university", "Predict a continuous value.", "y_hat = w * x + b", "Continuous prediction."), + # Module 6: Probabilities for Classification + ("sigmoid-function", "Sigmoid Function", "Smooth mapping from any score to a probability between 0 and 1.", "machine_learning", "university", "Convert score to probability.", "sigma(z) = 1 / (1 + e^(-z))", "Binary classification output."), + ("likelihood", "Likelihood", "Probability of observed data viewed as a function of parameters.", "statistics", "university", "How plausible the data is under a parameter choice.", "L(theta) = P(data | theta)", "Training objective foundation."), + ("maximum-likelihood-estimation", "Maximum Likelihood Estimation", "Parameter estimation by maximizing likelihood.", "statistics", "university", "Choose parameters that best explain data.", "argmax L(theta)", "Regression and classification fitting."), + ("logistic-regression", "Logistic Regression", "Binary classification model using linear score plus sigmoid.", "machine_learning", "university", "Linear score turned into a binary probability.", "p = sigma(w * x + b)", "Click/no-click, buy/no-buy prediction."), + # Module 7: Loss, Training, Optimization + ("squared-error", "Squared Error", "Squared difference between prediction and truth.", "machine_learning", "university", "Large mistakes hurt more.", "(y - y_hat)^2", "Regression training."), + ("mse-loss", "MSE Loss", "Average squared prediction error.", "machine_learning", "university", "Average squared error.", "mean[(y - y_hat)^2]", "Regression objective."), + ("cross-entropy-loss", "Cross-Entropy Loss", "Classification loss based on predicted probabilities.", "machine_learning", "university", "Wrong confident probabilities are punished hard.", "-[y * log(p) + (1 - y) * log(1 - p)]", "Classification objective."), + ("gradient-descent", "Gradient Descent", "Iterative parameter update method that reduces loss.", "machine_learning", "university", "Adjust parameters to reduce loss.", "w <- w - alpha * grad(L)", "Training loop."), + # Module 8: Generalization and Evaluation + ("overfitting", "Overfitting", "When a model fits training noise and fails on new data.", "machine_learning", "university", "Model memorizes instead of generalizing.", None, "Model selection and control."), + ("cross-validation", "Cross-Validation", "Repeated evaluation across multiple train/validation splits.", "machine_learning", "university", "Don't trust one split.", None, "Reliable model comparison."), + ("l1-regularization", "L1 Regularization", "Penalty based on absolute weight size.", "machine_learning", "university", "Push some weights to zero.", "loss + lambda * sum(|w|)", "Sparser models."), + ("l2-regularization", "L2 Regularization", "Penalty based on squared weight size.", "machine_learning", "university", "Shrink weights smoothly.", "loss + lambda * sum(w^2)", "Stability and overfitting control."), + # Module 9: Features and Practical Modeling + ("feature-engineering", "Feature Engineering", "Transforming raw variables into more useful model inputs.", "machine_learning", "university", "Make inputs more useful before modeling.", None, "Often bigger gain than changing model."), +] + + +def seed(dry_run: bool = False) -> bool: + """Seed 42 concept rows + real dossiers from Track 1 artifact.""" + from backend.services.chat.history import get_db, init_db + + init_db() + conn = get_db() + + for cid, name, desc, lens, level, intuition, formula, ml_usage in CONCEPTS: + # Concept row + conn.execute( + """INSERT OR REPLACE INTO concepts + (id, name, description, lens, level, source_count) + VALUES (?, ?, ?, ?, ?, 0)""", + (cid, name, desc, lens, level), + ) + + # Real dossier + formulas = ( + [{"content": formula, "content_latex": formula, + "source_id": SRC, "source_title": None}] + if formula else [] + ) + insights = [{"content": ml_usage, "source_id": SRC}] + atom_count = 2 + (1 if formula else 0) + 1 # def + intuition + formula? + insight + + conn.execute( + """INSERT OR REPLACE INTO concept_dossiers + (concept_id, formal_definition, intuitive_definition, + formulas_json, examples_json, prerequisite_claims_json, + key_insights_json, source_ids_json, + atom_count, source_count, built_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1, datetime('now'))""", + ( + cid, desc, intuition, + json.dumps(formulas), "[]", "[]", + json.dumps(insights), json.dumps([SRC]), + atom_count, + ), + ) + + if dry_run: + logger.info("Dry run — %d concepts validated, rolling back", len(CONCEPTS)) + conn.rollback() + else: + conn.commit() + logger.info("Seeded %d concepts + dossiers", len(CONCEPTS)) + + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bootstrap Track 1 concepts") + parser.add_argument("--dry-run", action="store_true") + args = parser.parse_args() + success = seed(dry_run=args.dry_run) + sys.exit(0 if success else 1) diff --git a/backend/jobs/seed_mlops_concepts.py b/backend/jobs/seed_mlops_concepts.py new file mode 100644 index 0000000..3c1067a --- /dev/null +++ b/backend/jobs/seed_mlops_concepts.py @@ -0,0 +1,114 @@ +"""Bootstrap Track 3 (MLOps & Production ML) concepts and dossiers. + +Inserts 24 concept rows + real dossiers (definition, intuition, formula, +ml_usage) so the explainer and practice generator have usable content. + +Usage: + python -m backend.jobs.seed_mlops_concepts + python -m backend.jobs.seed_mlops_concepts --dry-run +""" + +import argparse +import json +import logging +import sys + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +SRC = "curriculum:mlops-v1" + +# (id, name, description, lens, level, intuition, formula, ml_usage) +CONCEPTS = [ + # Module 1: Data Pipelines + ("data-ingestion", "Data Ingestion", "Process of collecting raw data from sources into a centralized store for ML consumption.", "machine_learning", "university", "Funnel raw data from many places into one place your model can read.", None, "Kafka streams, S3 landing zones, database CDC."), + ("data-validation", "Data Validation", "Automated checks that incoming data meets expected schema, ranges, and distributions.", "machine_learning", "university", "Catch bad data before it poisons your model.", None, "Great Expectations, TFX Data Validation, dbt tests."), + ("feature-pipeline", "Feature Pipeline", "Automated workflow that transforms raw data into model-ready features on a schedule.", "machine_learning", "university", "Turn messy raw data into clean inputs your model can use, automatically.", None, "Feast, Tecton, Airflow feature DAGs."), + ("data-versioning", "Data Versioning", "Tracking changes to datasets over time so any model can be reproduced with its exact training data.", "machine_learning", "university", "Git for your data - know exactly what your model trained on.", None, "DVC, LakeFS, Delta Lake time travel."), + ("etl-vs-elt", "ETL vs ELT", "Two data pipeline strategies: transform before loading (ETL) or load first then transform (ELT).", "machine_learning", "university", "Clean before storing vs store everything then clean what you need.", None, "dbt + BigQuery (ELT), Spark pipelines (ETL)."), + + # Module 2: Training Systems + ("experiment-tracking", "Experiment Tracking", "Recording hyperparameters, metrics, artifacts, and code versions for every training run.", "machine_learning", "university", "Lab notebook for ML - record everything so you can reproduce any result.", None, "MLflow, Weights & Biases, Neptune."), + ("hyperparameter-tuning", "Hyperparameter Tuning", "Systematic search for the model configuration that maximizes validation performance.", "machine_learning", "university", "Try many settings automatically instead of guessing.", None, "Optuna, Ray Tune, Bayesian optimization."), + ("distributed-training", "Distributed Training", "Splitting model training across multiple GPUs or machines to handle large models or datasets.", "machine_learning", "university", "Train on many machines at once when one is too slow.", None, "PyTorch DDP, DeepSpeed, Horovod."), + ("training-reproducibility", "Training Reproducibility", "Practices that ensure a training run produces the same model given the same inputs.", "machine_learning", "university", "Run the same experiment twice and get the same result.", None, "Fixed seeds, pinned dependencies, deterministic ops."), + ("gpu-resource-management", "GPU Resource Management", "Scheduling, sharing, and optimizing GPU compute across training jobs and teams.", "machine_learning", "university", "Make expensive GPUs available to the right jobs at the right time.", None, "Kubernetes GPU scheduling, spot instances, SLURM."), + + # Module 3: Model Serving + ("model-serialization", "Model Serialization", "Converting a trained model into a portable format for deployment.", "machine_learning", "university", "Save your model so another system can load and run it.", None, "ONNX, TorchScript, SavedModel, pickle."), + ("inference-server", "Inference Server", "Service that loads a model and handles prediction requests over HTTP or gRPC.", "machine_learning", "university", "A web server whose only job is running your model on incoming requests.", None, "Triton, TorchServe, TF Serving, BentoML."), + ("batch-vs-realtime-inference", "Batch vs Realtime Inference", "Tradeoff between precomputing predictions in bulk and computing them on demand.", "machine_learning", "university", "Predict everything ahead of time or wait until someone asks.", None, "Recommendation precompute vs search ranking."), + ("model-registry", "Model Registry", "Centralized catalog of trained models with metadata, lineage, and deployment status.", "machine_learning", "university", "A versioned inventory of every model you have ever trained.", None, "MLflow Registry, SageMaker Model Registry, Vertex AI."), + ("shadow-deployment", "Shadow Deployment", "Running a new model alongside the current one without serving its predictions to users.", "machine_learning", "university", "Test your new model on real traffic without anyone seeing the results.", None, "Pre-production validation, A/B test warmup."), + + # Module 4: Monitoring + ("data-drift", "Data Drift", "Change in the statistical distribution of input features between training and serving.", "machine_learning", "university", "The real world changed but your model still thinks it is 2023.", "PSI = sum((actual_i - expected_i) * ln(actual_i / expected_i))", "Feature distribution monitoring, retraining triggers."), + ("concept-drift", "Concept Drift", "Change in the relationship between inputs and the target variable over time.", "machine_learning", "university", "What used to predict success no longer does.", None, "Fraud pattern shifts, user preference changes."), + ("model-performance-monitoring", "Model Performance Monitoring", "Tracking prediction quality metrics on live traffic over time.", "machine_learning", "university", "Watch your model's accuracy in production so you know when it degrades.", None, "Prometheus + Grafana dashboards, Evidently AI."), + ("alerting-thresholds", "Alerting Thresholds", "Defining acceptable metric ranges that trigger retraining or rollback when breached.", "machine_learning", "university", "Set boundaries so the system pages you before users notice.", None, "SLO-based alerts, adaptive thresholds."), + + # Module 5: Lifecycle + ("ml-cicd", "ML CI/CD", "Continuous integration and deployment pipelines adapted for ML artifacts and validation.", "machine_learning", "university", "Automated testing and deployment but for models, not just code.", None, "GitHub Actions + model tests, CML, Vertex Pipelines."), + ("canary-rollout", "Canary Rollout", "Gradually shifting traffic to a new model version while monitoring for regressions.", "machine_learning", "university", "Send 5% of traffic to the new model first - if it breaks, only 5% of users are affected.", None, "Istio traffic splitting, SageMaker production variants."), + ("model-rollback", "Model Rollback", "Quickly reverting to a previous model version when the new one degrades.", "machine_learning", "university", "Hit undo on a bad deployment.", None, "Blue-green deploys, model registry version pinning."), + ("model-governance", "Model Governance", "Policies and processes for approving, auditing, and documenting models in production.", "machine_learning", "university", "Who approved this model, what data did it train on, and is it still compliant?", None, "Model cards, audit logs, EU AI Act compliance."), + ("technical-debt-ml", "Technical Debt in ML", "Hidden costs from shortcuts in ML systems that compound over time.", "machine_learning", "university", "Every quick fix in an ML system makes the next change harder.", None, "Glue code, pipeline jungles, undeclared dependencies."), +] + + +def seed(dry_run: bool = False) -> bool: + """Seed 24 concept rows + real dossiers from Track 3 artifact.""" + from backend.services.chat.history import get_db, init_db + + init_db() + conn = get_db() + + for cid, name, desc, lens, level, intuition, formula, ml_usage in CONCEPTS: + # Concept row + conn.execute( + """INSERT OR REPLACE INTO concepts + (id, name, description, lens, level, source_count) + VALUES (?, ?, ?, ?, ?, 0)""", + (cid, name, desc, lens, level), + ) + + # Real dossier + formulas = ( + [{"content": formula, "content_latex": formula, + "source_id": SRC, "source_title": None}] + if formula else [] + ) + insights = [{"content": ml_usage, "source_id": SRC}] + atom_count = 2 + (1 if formula else 0) + 1 # def + intuition + formula? + insight + + conn.execute( + """INSERT OR REPLACE INTO concept_dossiers + (concept_id, formal_definition, intuitive_definition, + formulas_json, examples_json, prerequisite_claims_json, + key_insights_json, source_ids_json, + atom_count, source_count, built_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1, datetime('now'))""", + ( + cid, desc, intuition, + json.dumps(formulas), "[]", "[]", + json.dumps(insights), json.dumps([SRC]), + atom_count, + ), + ) + + if dry_run: + logger.info("Dry run — %d concepts validated, rolling back", len(CONCEPTS)) + conn.rollback() + else: + conn.commit() + logger.info("Seeded %d concepts + dossiers", len(CONCEPTS)) + + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bootstrap Track 3 concepts") + parser.add_argument("--dry-run", action="store_true") + args = parser.parse_args() + success = seed(dry_run=args.dry_run) + sys.exit(0 if success else 1) diff --git a/backend/jobs/seed_practical_ml_concepts.py b/backend/jobs/seed_practical_ml_concepts.py new file mode 100644 index 0000000..95d981b --- /dev/null +++ b/backend/jobs/seed_practical_ml_concepts.py @@ -0,0 +1,113 @@ +"""Bootstrap Track 2 (Practical ML Systems) concepts and dossiers. + +Inserts 23 concept rows + real dossiers (definition, intuition, formula, +ml_usage) so the explainer and practice generator have usable content. + +Usage: + python -m backend.jobs.seed_practical_ml_concepts + python -m backend.jobs.seed_practical_ml_concepts --dry-run +""" + +import argparse +import json +import logging +import sys + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +SRC = "curriculum:practical-ml-v1" + +# (id, name, description, lens, level, intuition, formula, ml_usage) +CONCEPTS = [ + # Module 1: Recommender Systems + ("collaborative-filtering", "Collaborative Filtering", "Recommendation method that predicts preferences based on similar users or items.", "machine_learning", "university", "People who liked the same things in the past will like the same things in the future.", None, "Netflix, Spotify, Amazon product recs."), + ("content-based-filtering", "Content-Based Filtering", "Recommendation method that uses item features to suggest similar items.", "machine_learning", "university", "Recommend items similar to what you already liked.", "sim(item_a, item_b) = cosine(features_a, features_b)", "News articles, YouTube videos, job listings."), + ("matrix-factorization", "Matrix Factorization", "Decomposing the user-item interaction matrix into latent factor matrices.", "machine_learning", "university", "Compress a huge table of ratings into a small set of hidden preferences.", "R ≈ U * V^T", "Netflix Prize, implicit feedback systems."), + ("hybrid-recommender", "Hybrid Recommender", "System combining collaborative and content-based signals for better recommendations.", "machine_learning", "university", "Use multiple recommendation strategies together.", None, "Production recommender systems at scale."), + ("cold-start-problem", "Cold-Start Problem", "Challenge of making recommendations when a new user or item has no interaction history.", "machine_learning", "university", "How do you recommend when you know nothing about someone?", None, "Onboarding flows, new product launches."), + + # Module 2: Ranking Systems + ("pointwise-ranking", "Pointwise Ranking", "Learning-to-rank approach that scores each item independently.", "machine_learning", "university", "Score each result on its own, then sort.", "score = f(query, doc)", "Simple search ranking, ad CTR prediction."), + ("pairwise-ranking", "Pairwise Ranking", "Learning-to-rank approach that learns relative ordering between item pairs.", "machine_learning", "university", "Learn which of two results should come first.", "P(doc_a > doc_b) = sigma(s_a - s_b)", "RankNet, LambdaRank."), + ("listwise-ranking", "Listwise Ranking", "Learning-to-rank approach that optimizes the entire ranked list as a unit.", "machine_learning", "university", "Optimize the whole ranked list at once, not just pairs.", None, "ListNet, SoftRank, modern search engines."), + ("ndcg", "NDCG", "Normalized Discounted Cumulative Gain - metric measuring ranking quality with position-weighted relevance.", "machine_learning", "university", "Top results matter more than bottom results.", "DCG = sum(rel_i / log2(i+1)); NDCG = DCG / ideal_DCG", "Search quality, recommendation evaluation."), + ("feature-store", "Feature Store", "Centralized system for computing, storing, and serving ML features consistently across training and inference.", "machine_learning", "university", "One place to manage all the inputs your models need.", None, "Feast, Tecton, Uber Michelangelo."), + + # Module 3: Retrieval Systems + ("embedding-similarity", "Embedding Similarity", "Measuring closeness between items by comparing their dense vector representations.", "machine_learning", "university", "Similar things end up close together in vector space.", "sim(a, b) = cos(emb_a, emb_b)", "Semantic search, duplicate detection."), + ("approximate-nearest-neighbor", "Approximate Nearest Neighbor", "Algorithms that find similar vectors quickly by trading exactness for speed.", "machine_learning", "university", "Find close-enough neighbors without checking every single item.", None, "FAISS, ScaNN, HNSW in vector databases."), + ("two-tower-model", "Two-Tower Model", "Architecture encoding queries and items separately for efficient retrieval.", "machine_learning", "university", "Encode queries and items separately so you can pre-compute item embeddings.", "score = dot(query_tower(q), item_tower(i))", "YouTube, Google search, candidate generation."), + ("retrieval-then-rank", "Retrieval-Then-Rank", "Two-stage pipeline: fast retrieval narrows candidates, then a precise ranker orders them.", "machine_learning", "university", "First find a shortlist fast, then rank it carefully.", None, "Every large-scale search and recommendation system."), + + # Module 4: Feedback Loops + ("feedback-loop", "Feedback Loop", "Cycle where model predictions influence future training data, potentially amplifying biases.", "machine_learning", "university", "The model shapes the data it learns from next.", None, "Content recommendation, predictive policing."), + ("popularity-bias", "Popularity Bias", "Tendency of recommenders to over-recommend already popular items.", "machine_learning", "university", "Popular items get shown more, get more clicks, become even more popular.", None, "Music streaming, e-commerce, news feeds."), + ("exploration-exploitation", "Exploration vs Exploitation", "Tradeoff between recommending known-good items and discovering new ones.", "machine_learning", "university", "Show what you know works vs try something new to learn.", None, "Bandits, A/B tests, ad serving."), + ("position-bias", "Position Bias", "Users click higher-ranked results regardless of relevance, skewing training data.", "machine_learning", "university", "People click the first result even if the third is better.", None, "Search click models, unbiased learning to rank."), + + # Module 5: Evaluation Metrics + ("precision-recall", "Precision and Recall", "Precision measures correctness of positive predictions; recall measures coverage of actual positives.", "machine_learning", "university", "Precision: how many selected items are relevant. Recall: how many relevant items are selected.", "precision = TP / (TP + FP); recall = TP / (TP + FN)", "Spam filtering, medical diagnosis, search."), + ("roc-auc", "ROC-AUC", "Area under the ROC curve - measures a classifier's ability to separate classes across all thresholds.", "machine_learning", "university", "How well does the model separate positives from negatives overall?", "AUC = P(score(pos) > score(neg))", "Binary classification evaluation."), + ("calibration", "Calibration", "Whether predicted probabilities match observed frequencies.", "machine_learning", "university", "When the model says 70% chance, it should happen about 70% of the time.", None, "Ad CTR prediction, weather forecasting, medical risk."), + ("online-ab-testing", "Online A/B Testing", "Controlled experiment comparing model variants on live traffic.", "machine_learning", "university", "Split users into groups and measure which model version performs better.", None, "Feature launches, ranking model updates."), + ("offline-online-gap", "Offline-Online Gap", "Discrepancy between offline evaluation metrics and online business outcomes.", "machine_learning", "university", "A model that looks great on test data may not move real metrics.", None, "Every ML team deploying to production."), +] + + +def seed(dry_run: bool = False) -> bool: + """Seed 23 concept rows + real dossiers from Track 2 artifact.""" + from backend.services.chat.history import get_db, init_db + + init_db() + conn = get_db() + + for cid, name, desc, lens, level, intuition, formula, ml_usage in CONCEPTS: + # Concept row + conn.execute( + """INSERT OR REPLACE INTO concepts + (id, name, description, lens, level, source_count) + VALUES (?, ?, ?, ?, ?, 0)""", + (cid, name, desc, lens, level), + ) + + # Real dossier + formulas = ( + [{"content": formula, "content_latex": formula, + "source_id": SRC, "source_title": None}] + if formula else [] + ) + insights = [{"content": ml_usage, "source_id": SRC}] + atom_count = 2 + (1 if formula else 0) + 1 # def + intuition + formula? + insight + + conn.execute( + """INSERT OR REPLACE INTO concept_dossiers + (concept_id, formal_definition, intuitive_definition, + formulas_json, examples_json, prerequisite_claims_json, + key_insights_json, source_ids_json, + atom_count, source_count, built_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1, datetime('now'))""", + ( + cid, desc, intuition, + json.dumps(formulas), "[]", "[]", + json.dumps(insights), json.dumps([SRC]), + atom_count, + ), + ) + + if dry_run: + logger.info("Dry run — %d concepts validated, rolling back", len(CONCEPTS)) + conn.rollback() + else: + conn.commit() + logger.info("Seeded %d concepts + dossiers", len(CONCEPTS)) + + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bootstrap Track 2 concepts") + parser.add_argument("--dry-run", action="store_true") + args = parser.parse_args() + success = seed(dry_run=args.dry_run) + sys.exit(0 if success else 1) diff --git a/backend/models/curriculum.py b/backend/models/curriculum.py index dc0d60b..84f6175 100644 --- a/backend/models/curriculum.py +++ b/backend/models/curriculum.py @@ -40,6 +40,9 @@ class CurriculumTrackFile(BaseModel): difficulty: str track_type: str = "concept" is_published: bool = True + category: str = "" + category_order: int = 0 + track_order: int = 0 modules: list[CurriculumModuleFile] @@ -113,6 +116,9 @@ class CurriculumTrackSummary(BaseModel): difficulty: str track_type: str = "concept" is_published: bool + category: str = "" + category_order: int = 0 + track_order: int = 0 module_count: int concept_count: int resource_count: int = 0 diff --git a/backend/services/infrastructure/db_migrations.py b/backend/services/infrastructure/db_migrations.py index 4af98d8..dc6919a 100644 --- a/backend/services/infrastructure/db_migrations.py +++ b/backend/services/infrastructure/db_migrations.py @@ -3005,6 +3005,27 @@ def _migration_084_track_resources(conn: sqlite3.Connection) -> None: """) +def _migration_085_track_categories(conn: sqlite3.Connection) -> None: + """Add category, category_order, track_order columns to curriculum_tracks.""" + for col, typedef in [ + ("category", "TEXT NOT NULL DEFAULT ''"), + ("category_order", "INTEGER NOT NULL DEFAULT 0"), + ("track_order", "INTEGER NOT NULL DEFAULT 0"), + ]: + try: + conn.execute(f"ALTER TABLE curriculum_tracks ADD COLUMN {col} {typedef}") + except sqlite3.OperationalError: + pass # Column already exists + + +def _migration_086_drop_track_sort_order(conn: sqlite3.Connection) -> None: + """Drop deprecated sort_order column from curriculum_tracks.""" + try: + conn.execute("ALTER TABLE curriculum_tracks DROP COLUMN sort_order") + except sqlite3.OperationalError: + pass # Column already dropped + + MIGRATIONS: tuple[MigrationStep, ...] = ( MigrationStep("0001", "chat_core_tables", _migration_001_chat_core), MigrationStep("0002", "messages_max_rerank_score", _migration_002_messages_max_rerank_score), @@ -3090,6 +3111,8 @@ def _migration_084_track_resources(conn: sqlite3.Connection) -> None: MigrationStep("0082", "concept_progress_per_user", _migration_082_concept_progress_per_user), MigrationStep("0083", "concepts_lens_level", _migration_083_concepts_lens_level), MigrationStep("0084", "track_resources", _migration_084_track_resources), + MigrationStep("0085", "track_categories", _migration_085_track_categories), + MigrationStep("0086", "drop_track_sort_order", _migration_086_drop_track_sort_order), ) diff --git a/backend/services/learning/concept_explainer.py b/backend/services/learning/concept_explainer.py index 100d9ed..cf6857c 100644 --- a/backend/services/learning/concept_explainer.py +++ b/backend/services/learning/concept_explainer.py @@ -138,6 +138,13 @@ class StructuredExplanation(BaseModel): prerequisite_note: str | None = None +class ChunkPreview(BaseModel): + source_id: str + title: str + preview: str + relevance_score: float = 0.0 + + class ConceptExplanation(BaseModel): concept_id: str concept_name: str @@ -145,6 +152,9 @@ class ConceptExplanation(BaseModel): structured: StructuredExplanation | None = None source_ids: list[str] chunk_count: int + source_titles: dict[str, str] = {} + dossier_grounded: bool = False + chunk_previews: list[ChunkPreview] = [] async def explain_concept( @@ -174,18 +184,37 @@ async def explain_concept( ) docs, timing = retriever.retrieve_with_timing(concept_name) + scores: list[float] = timing.get("scores", []) + + # Pad scores if missing (reranker unavailable) + if len(scores) < len(docs): + scores.extend([0.0] * (len(docs) - len(scores))) + + # Filter by rerank relevance score — concept explanations need high relevance + # to avoid keyword-only matches (e.g. "Number" matching "this number is impressive") + # Concept explanations need high relevance — keyword-only matches (e.g. + # "Number" matching "this number is impressive") score 70-82%, while + # genuinely relevant chunks score 90%+. Use 0.90 to keep only real hits. + CONCEPT_RELEVANCE_THRESHOLD = 0.90 + if scores: + paired = [(d, s) for d, s in zip(docs, scores) if s >= CONCEPT_RELEVANCE_THRESHOLD] + if paired: + docs, scores = [list(t) for t in zip(*paired)] + else: + # All below threshold — fall back to dossier-only + docs, scores = [], [] # Filter to chunks from sources that actually mention this concept - if source_ids: + if source_ids and docs: source_set = set(source_ids) - filtered = [ - d for d in docs + paired = [ + (d, s) for d, s in zip(docs, scores) if d.metadata.get("source_id") in source_set or d.metadata.get("video_id") in source_set ] # Fall back to all results if filtering removes everything - if filtered: - docs = filtered + if paired: + docs, scores = [list(t) for t in zip(*paired)] docs = docs[:max_chunks] @@ -200,12 +229,26 @@ async def explain_concept( # Build chunk context chunk_texts = [] - seen_sources = set() - for doc in docs: + seen_sources: set[str] = set() + source_titles: dict[str, str] = {} + chunk_previews: list[ChunkPreview] = [] + for idx, doc in enumerate(docs): source_id = doc.metadata.get("source_id") or doc.metadata.get("video_id", "unknown") title = doc.metadata.get("title", "Untitled") chunk_texts.append(f"[Source: {title} ({source_id})]\n{doc.page_content}") seen_sources.add(source_id) + source_titles[source_id] = title + raw = doc.page_content.strip() + # Strip [Source: ...] prefix if baked into chunk text + if raw.startswith("[Source:"): + close = raw.find("]") + if close != -1: + raw = raw[close + 1:].strip() + preview = raw[:200] + if len(raw) > 200: + preview += "..." + chunk_score = scores[idx] if idx < len(scores) else 0.0 + chunk_previews.append(ChunkPreview(source_id=source_id, title=title, preview=preview, relevance_score=round(chunk_score, 3))) chunks_str = "\n\n---\n\n".join(chunk_texts) @@ -289,6 +332,8 @@ async def explain_concept( ) conn.commit() + dossier_grounded = dossier is not None and dossier.atom_count > 0 + return ConceptExplanation( concept_id=concept_id, concept_name=concept_name, @@ -296,6 +341,9 @@ async def explain_concept( structured=structured, source_ids=list(seen_sources), chunk_count=len(docs), + source_titles=source_titles, + dossier_grounded=dossier_grounded, + chunk_previews=chunk_previews, ) diff --git a/backend/services/learning/curriculum_loader.py b/backend/services/learning/curriculum_loader.py index 86b4630..d48aea7 100644 --- a/backend/services/learning/curriculum_loader.py +++ b/backend/services/learning/curriculum_loader.py @@ -226,10 +226,13 @@ def seed_track(track: CurriculumTrackFile, conn: sqlite3.Connection | None = Non # Insert track conn.execute( - """INSERT INTO curriculum_tracks (id, title, description, difficulty, track_type, sort_order, is_published) - VALUES (?, ?, ?, ?, ?, ?, ?)""", + """INSERT INTO curriculum_tracks + (id, title, description, difficulty, track_type, + is_published, category, category_order, track_order) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""", (track.id, track.title, track.description.strip(), track.difficulty, - track.track_type, 0, int(track.is_published)), + track.track_type, int(track.is_published), + track.category, track.category_order, track.track_order), ) # Insert modules, concept links, and resources diff --git a/backend/services/learning/curriculum_store.py b/backend/services/learning/curriculum_store.py index 44c137f..4c56eeb 100644 --- a/backend/services/learning/curriculum_store.py +++ b/backend/services/learning/curriculum_store.py @@ -56,11 +56,15 @@ def derive_readiness(conn: sqlite3.Connection, concept_id: str) -> str: ).fetchone() atom_type_count = type_count_row["cnt"] if type_count_row else 0 source_count = dossier["source_count"] + dossier_atom_count = dossier["atom_count"] if atom_type_count >= _RICH_MIN_ATOM_TYPES and source_count >= _RICH_MIN_SOURCES: return "rich" if atom_type_count >= _GROUNDED_MIN_ATOM_TYPES or source_count >= _GROUNDED_MIN_SOURCES: return "grounded" + # Dossier-grounded: authored dossier with real content but no concept_atoms rows + if dossier_atom_count >= _GROUNDED_MIN_ATOM_TYPES and source_count >= 1: + return "grounded" return "scaffolded" @@ -88,6 +92,7 @@ def list_tracks(published_only: bool = True) -> list[dict]: rows = conn.execute(f""" SELECT t.id, t.title, t.description, t.difficulty, t.track_type, t.is_published, + t.category, t.category_order, t.track_order, COUNT(DISTINCT m.id) as module_count, COUNT(DISTINCT mc.concept_id) as concept_count, COUNT(DISTINCT tr.id) as resource_count @@ -97,7 +102,7 @@ def list_tracks(published_only: bool = True) -> list[dict]: LEFT JOIN track_resources tr ON tr.track_id = t.id AND tr.module_id = m.id {where} GROUP BY t.id - ORDER BY t.sort_order, t.title + ORDER BY t.category_order ASC, t.track_order ASC """).fetchall() return [dict(r) for r in rows] diff --git a/curriculum/tracks/ai-engineering.yaml b/curriculum/tracks/ai-engineering.yaml index efd351a..79e3ffe 100644 --- a/curriculum/tracks/ai-engineering.yaml +++ b/curriculum/tracks/ai-engineering.yaml @@ -4,6 +4,9 @@ description: > Build production AI systems - from inference to agents to evals. difficulty: intermediate track_type: resource +category: systems +category_order: 2 +track_order: 2 modules: - id: inference diff --git a/curriculum/tracks/applied-systems.yaml b/curriculum/tracks/applied-systems.yaml index 662e4b0..d128cea 100644 --- a/curriculum/tracks/applied-systems.yaml +++ b/curriculum/tracks/applied-systems.yaml @@ -1,229 +1,119 @@ id: applied-systems -title: Applied Systems +title: Applied ML Systems description: > - Production ML systems beyond model training - LLMOps, recommendation engines, - data pipelines, 3D vision, and distributed training at scale. -difficulty: intermediate -track_type: resource + End-to-end ML systems in production - retrieval, ranking, + recommendations, experimentation, and system tradeoffs. +difficulty: advanced +track_type: concept +is_published: true +category: systems +category_order: 2 +track_order: 3 + modules: - - id: llmops - title: LLMOps + # ── Module 1: Retrieval and Ranking ───────────────────────────────────── + - id: retrieval-and-ranking + title: Retrieval and Ranking objective: > - Run LLMs in production without surprises - eval, tracing, guardrails, and reliability. - color: "#55cdff" + Build production retrieval pipelines from query understanding + through re-ranking, combining sparse and dense signals. + estimated_time_minutes: 60 sort_order: 1 - resources: - - name: LangSmith - url: https://www.langchain.com/langsmith - description: > - Tracing, evaluation datasets, and regression checks for LLM apps. - detail: > - Use to practice observability and experiment comparison. + color: "#55cdff" + concepts: + - concept_id: query-understanding sort_order: 1 - - name: Langfuse - url: https://langfuse.com - description: > - Open-source LLM observability, prompts, and scoring pipelines. - detail: > - Use to drill telemetry schema and incident debugging. + - concept_id: inverted-index sort_order: 2 - - name: Arize Phoenix - url: https://phoenix.arize.com - description: > - LLM tracing, retrieval analysis, and quality diagnostics. - detail: > - Use for retrieval error triage and quality root-cause analysis. + - concept_id: semantic-search sort_order: 3 - - name: Weights & Biases Weave - url: https://wandb.ai/site/weave - description: > - Model/app evaluation and experiment tracking for LLM workflows. - detail: > - Use for side-by-side prompt/pipeline iteration practice. + - concept_id: hybrid-retrieval sort_order: 4 - - name: Guardrails AI - url: https://www.guardrailsai.com - description: > - Validation and policy guardrails for structured LLM outputs. - detail: > - Use to rehearse fail-safe output enforcement patterns. + - concept_id: re-ranking sort_order: 5 + - concept_id: learning-to-rank-pipeline + sort_order: 6 - - id: recsys - title: RecSys + # ── Module 2: Recommender Architecture ────────────────────────────────── + - id: recommender-architecture + title: Recommender Architecture objective: > - Build recommendations that actually convert - retrieval, ranking, and experimentation. - color: "#ffc47c" + Design multi-stage recommendation systems that balance relevance, + diversity, and latency at scale. + estimated_time_minutes: 60 sort_order: 2 - resources: - - name: Recommender Systems Handbook - url: https://link.springer.com/book/10.1007/978-1-4899-7637-6 - description: > - Classical and modern recommendation methods with system context. - detail: > - Use as foundational theory for interview tradeoff answers. + color: "#4ade80" + concepts: + - concept_id: candidate-generation sort_order: 1 - - name: ACM RecSys Conference - url: https://recsys.acm.org - description: > - Current research trends in ranking, retrieval, and personalization. - detail: > - Use to stay current on evaluation and modeling directions. + - concept_id: multi-stage-ranking sort_order: 2 - - name: NVIDIA Merlin - url: https://developer.nvidia.com/merlin - description: > - Industrial recommendation stack patterns and tooling. - detail: > - Use for practical pipeline architecture examples. + - concept_id: real-time-personalization sort_order: 3 - - name: Eugene Yan (Applied RecSys) - url: https://eugeneyan.com - description: > - Production recommendation and search system case studies. - detail: > - Use for interview-ready system narratives and metrics framing. + - concept_id: session-based-recommendation sort_order: 4 - - name: Shaped Blog - url: https://www.shaped.ai/blog - description: > - Modern personalization, retrieval-ranking stacks, and online learning. - detail: > - Use for current production patterns and tradeoff examples. + - concept_id: multi-objective-ranking sort_order: 5 - - id: dataops - title: DataOps + # ── Module 3: Experimentation and Metrics ─────────────────────────────── + - id: experimentation-and-metrics + title: Experimentation and Metrics objective: > - Keep your data pipelines healthy - orchestration, quality, lineage, and reliability. - color: "#5bb86e" + Design sound experiments, choose the right metrics, and avoid + common pitfalls that lead to wrong product decisions. + estimated_time_minutes: 45 sort_order: 3 - resources: - - name: Apache Airflow - url: https://airflow.apache.org - description: > - Workflow orchestration patterns, scheduling, and operational controls. - detail: > - Use to practice DAG design and failure recovery patterns. + color: "#ffc47c" + concepts: + - concept_id: controlled-experiment-design sort_order: 1 - - name: dbt Docs - url: https://docs.getdbt.com - description: > - Transformation modeling, testing, and analytics engineering workflow. - detail: > - Use to drill model contracts, tests, and deployment workflows. + - concept_id: metric-taxonomy sort_order: 2 - - name: Dagster - url: https://dagster.io - description: > - Asset-oriented orchestration and data platform software design. - detail: > - Use for lineage-aware orchestration and asset health concepts. + - concept_id: sample-size-estimation sort_order: 3 - - name: DataHub - url: https://datahubproject.io - description: > - Metadata platform and end-to-end lineage across data assets. - detail: > - Use for catalog, ownership, and governance interview scenarios. + - concept_id: novelty-primacy-effects sort_order: 4 - - name: Great Expectations - url: https://greatexpectations.io - description: > - Automated data quality assertions and validation pipelines. - detail: > - Use for reliability guardrails and data contract enforcement. + - concept_id: interleaving-experiments sort_order: 5 - - id: 3d_vision - title: 3D Vision + # ── Module 4: System Tradeoffs ────────────────────────────────────────── + - id: system-tradeoffs + title: System Tradeoffs objective: > - Reconstruct and understand 3D scenes - NeRFs, Gaussian Splatting, and spatial AI. - color: "#f472b6" + Navigate the fundamental architectural tradeoffs that shape + how ML systems behave in production. + estimated_time_minutes: 45 sort_order: 4 - resources: - - name: Nerfstudio - url: https://nerf.studio - description: > - Modular framework for NeRF development - training, visualization, and export of neural radiance fields. - detail: > - Hands-on practice with NeRF pipelines and scene reconstruction. + color: "#eb5757" + concepts: + - concept_id: latency-throughput-tradeoff sort_order: 1 - - name: gsplat - url: https://docs.gsplat.studio - description: > - Optimized 3D Gaussian Splatting library for real-time novel view synthesis. - detail: > - Study why Gaussian Splatting is overtaking NeRFs for real-time applications. + - concept_id: batch-vs-real-time-serving sort_order: 2 - - name: Open3D - url: http://www.open3d.org - description: > - Open-source library for 3D data processing - point clouds, meshes, RGB-D images, and visualization. - detail: > - Essential toolkit for 3D computer vision pipelines. + - concept_id: model-complexity-tradeoff sort_order: 3 - - name: Habitat (Meta AI) - url: https://aihabitat.org - description: > - High-performance 3D simulation platform for embodied AI research - navigation, manipulation, and rearrangement. - detail: > - Study the sim-to-real pipeline and how 3D understanding enables robot navigation. + - concept_id: feature-freshness-tradeoff sort_order: 4 - - name: 3D Gaussian Splatting (Original Paper) - url: https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/ - description: > - The seminal paper on representing scenes as collections of 3D Gaussians for real-time rendering. - detail: > - Read for the core algorithm: why Gaussians, how splatting works, and the quality-speed tradeoff vs NeRFs. + - concept_id: consistency-availability-tradeoff sort_order: 5 - - id: distributed_ml - title: Distributed ML + # ── Module 5: Production Case Studies ─────────────────────────────────── + - id: production-case-studies + title: Production Case Studies objective: > - Train models across hundreds of GPUs without losing your mind - parallelism and fault tolerance. - color: "#4ade80" + Study real ML systems end-to-end - how they retrieve, rank, + personalize, and handle adversarial inputs at scale. + estimated_time_minutes: 45 sort_order: 5 - resources: - - name: DeepSpeed - url: https://www.deepspeed.ai - description: > - Microsoft's distributed training library: ZeRO stages, pipeline parallelism, mixed precision, and inference optimization. - detail: > - Study ZeRO-1/2/3 memory partitioning - it's the most asked-about distributed training concept. + color: "#818cf8" + concepts: + - concept_id: notification-ranking-system sort_order: 1 - - name: Megatron-LM - url: https://github.com/NVIDIA/Megatron-LM - description: > - NVIDIA's framework for training multi-billion parameter models with tensor and pipeline parallelism. - detail: > - Study tensor parallelism (column/row splitting) and pipeline parallelism (micro-batching). + - concept_id: fraud-detection-system sort_order: 2 - - name: PyTorch FSDP - url: https://pytorch.org/docs/stable/fsdp.html - description: > - Fully Sharded Data Parallel - PyTorch-native ZeRO-3 implementation for large model training. - detail: > - Know FSDP wrapping policies, mixed precision, and activation checkpointing. + - concept_id: search-autocomplete-system sort_order: 3 - - name: Ray Train - url: https://docs.ray.io/en/latest/train/train.html - description: > - Distributed training orchestration with fault tolerance, elastic scaling, and multi-framework support. - detail: > - Study how Ray abstracts distributed training across heterogeneous clusters. + - concept_id: dynamic-pricing-system sort_order: 4 - - name: Horovod - url: https://horovod.ai - description: > - Uber's distributed training framework using ring-AllReduce for efficient gradient synchronization. - detail: > - Understand ring-AllReduce vs tree-AllReduce and how Horovod simplifies multi-GPU training. + - concept_id: content-moderation-system sort_order: 5 - - name: Scaling Laws (Chinchilla / Kaplan et al.) - url: https://arxiv.org/abs/2203.15556 - description: > - Compute-optimal training: how to allocate compute budget between model size and data volume. - detail: > - Know the Chinchilla ratio (~20 tokens per parameter) and how it changed LLM training strategies. - sort_order: 6 diff --git a/curriculum/tracks/behavioral-design.yaml b/curriculum/tracks/behavioral-design.yaml index 9ac22cd..c99244c 100644 --- a/curriculum/tracks/behavioral-design.yaml +++ b/curriculum/tracks/behavioral-design.yaml @@ -4,6 +4,9 @@ description: > Persuasion frameworks, engagement loops, and product psychology patterns. difficulty: intermediate track_type: resource +category: product +category_order: 3 +track_order: 1 modules: - id: frameworks title: Frameworks diff --git a/curriculum/tracks/bio-augmentation.yaml b/curriculum/tracks/bio-augmentation.yaml index be4e21b..41ec2fe 100644 --- a/curriculum/tracks/bio-augmentation.yaml +++ b/curriculum/tracks/bio-augmentation.yaml @@ -6,6 +6,9 @@ description: > of human-machine fusion. difficulty: intermediate track_type: resource +category: specialization +category_order: 4 +track_order: 2 modules: - id: foundations title: Bio Foundations diff --git a/curriculum/tracks/cognitive-toolkit.yaml b/curriculum/tracks/cognitive-toolkit.yaml index 0a1431d..2e8d55a 100644 --- a/curriculum/tracks/cognitive-toolkit.yaml +++ b/curriculum/tracks/cognitive-toolkit.yaml @@ -4,6 +4,9 @@ description: > Mental models, decision frameworks, and cognitive techniques for better thinking. difficulty: intermediate track_type: resource +category: product +category_order: 3 +track_order: 2 modules: - id: foundation title: Foundation diff --git a/curriculum/tracks/data-engineering.yaml b/curriculum/tracks/data-engineering.yaml new file mode 100644 index 0000000..33670cf --- /dev/null +++ b/curriculum/tracks/data-engineering.yaml @@ -0,0 +1,115 @@ +id: data-engineering +title: Data Engineering for ML Systems +description: > + How data is collected, modeled, transformed, stored, and served + for analytics and machine learning. +difficulty: intermediate +track_type: concept +is_published: true +category: core +category_order: 1 +track_order: 4 + +modules: + # ── Module 1: Data Modeling ───────────────────────────────────────────── + - id: data-modeling + title: Data Modeling + objective: > + Design schemas and relationships that make data queryable, + consistent, and ready for downstream analytics and ML. + estimated_time_minutes: 60 + sort_order: 1 + color: "#55cdff" + concepts: + - concept_id: relational-schema + sort_order: 1 + - concept_id: normalization-denormalization + sort_order: 2 + - concept_id: star-schema + sort_order: 3 + - concept_id: slowly-changing-dimensions + sort_order: 4 + - concept_id: data-contracts + sort_order: 5 + + # ── Module 2: Storage Systems ─────────────────────────────────────────── + - id: storage-systems + title: Storage Systems + objective: > + Choose the right storage layer for structured, semi-structured, + and unstructured data at different scales. + estimated_time_minutes: 60 + sort_order: 2 + color: "#4ade80" + concepts: + - concept_id: data-warehouse + sort_order: 1 + - concept_id: data-lake + sort_order: 2 + - concept_id: lakehouse-architecture + sort_order: 3 + - concept_id: columnar-storage + sort_order: 4 + - concept_id: partitioning-and-clustering + sort_order: 5 + + # ── Module 3: Transformation Pipelines ────────────────────────────────── + - id: transformation-pipelines + title: Transformation Pipelines + objective: > + Build reliable data transformations from raw ingestion + to clean, tested, production-grade tables. + estimated_time_minutes: 60 + sort_order: 3 + color: "#ffc47c" + concepts: + - concept_id: batch-processing + sort_order: 1 + - concept_id: stream-processing + sort_order: 2 + - concept_id: dbt-transformations + sort_order: 3 + - concept_id: data-quality-testing + sort_order: 4 + - concept_id: idempotent-pipelines + sort_order: 5 + + # ── Module 4: Orchestration and Reliability ───────────────────────────── + - id: orchestration-and-reliability + title: Orchestration and Reliability + objective: > + Schedule, monitor, and recover data workflows so pipelines + run on time and failures are caught before they propagate. + estimated_time_minutes: 45 + sort_order: 4 + color: "#eb5757" + concepts: + - concept_id: dag-orchestration + sort_order: 1 + - concept_id: backfill-and-replay + sort_order: 2 + - concept_id: data-observability + sort_order: 3 + - concept_id: sla-and-freshness + sort_order: 4 + + # ── Module 5: Serving and Access ──────────────────────────────────────── + - id: serving-and-access + title: Serving and Access + objective: > + Deliver processed data to consumers - dashboards, APIs, + feature stores, and ML training jobs. + estimated_time_minutes: 45 + sort_order: 5 + color: "#818cf8" + concepts: + - concept_id: feature-store + sort_order: 1 + - concept_id: reverse-etl + sort_order: 2 + - concept_id: materialized-views + sort_order: 3 + - concept_id: data-catalog + sort_order: 4 + - concept_id: access-control-and-governance + sort_order: 5 diff --git a/curriculum/tracks/databases.yaml b/curriculum/tracks/databases.yaml index 934bcc1..962d500 100644 --- a/curriculum/tracks/databases.yaml +++ b/curriculum/tracks/databases.yaml @@ -5,6 +5,9 @@ description: > NoSQL tradeoffs, architecture patterns, and interview-ready knowledge. difficulty: intermediate track_type: resource +category: systems +category_order: 2 +track_order: 1 modules: - id: sqlite title: SQLite diff --git a/curriculum/tracks/embodied-ai.yaml b/curriculum/tracks/embodied-ai.yaml index a9744cc..540a740 100644 --- a/curriculum/tracks/embodied-ai.yaml +++ b/curriculum/tracks/embodied-ai.yaml @@ -6,6 +6,9 @@ description: > agentic automation, edge inference, and the physical AI industry. difficulty: intermediate track_type: resource +category: specialization +category_order: 4 +track_order: 1 modules: - id: world_models title: World Models diff --git a/curriculum/tracks/freelance-strategy.yaml b/curriculum/tracks/freelance-strategy.yaml index 2656e77..fad3c36 100644 --- a/curriculum/tracks/freelance-strategy.yaml +++ b/curriculum/tracks/freelance-strategy.yaml @@ -4,6 +4,9 @@ description: > Build a sustainable freelance practice - positioning, pricing, and growth. difficulty: intermediate track_type: resource +category: career +category_order: 5 +track_order: 2 modules: - id: realtime_systems title: Real-Time Systems diff --git a/curriculum/tracks/frontend-engineering.yaml b/curriculum/tracks/frontend-engineering.yaml index e5fcadd..e5920b3 100644 --- a/curriculum/tracks/frontend-engineering.yaml +++ b/curriculum/tracks/frontend-engineering.yaml @@ -5,6 +5,9 @@ description: > data fetching, performance, TypeScript, testing, and architecture at scale. difficulty: intermediate track_type: resource +category: systems +category_order: 2 +track_order: 5 modules: - id: state title: State Management diff --git a/curriculum/tracks/gpu-for-ai.yaml b/curriculum/tracks/gpu-for-ai.yaml index b547ae6..00c5ae6 100644 --- a/curriculum/tracks/gpu-for-ai.yaml +++ b/curriculum/tracks/gpu-for-ai.yaml @@ -6,6 +6,9 @@ description: > and alternatives to NVIDIA. difficulty: intermediate track_type: resource +category: systems +category_order: 2 +track_order: 4 modules: - id: architecture title: GPU Architecture diff --git a/curriculum/tracks/interview-prep.yaml b/curriculum/tracks/interview-prep.yaml index dbe2ffd..d1e50db 100644 --- a/curriculum/tracks/interview-prep.yaml +++ b/curriculum/tracks/interview-prep.yaml @@ -4,6 +4,9 @@ description: > Prepare for technical interviews - system design, coding, and behavioral. difficulty: intermediate track_type: resource +category: career +category_order: 5 +track_order: 1 modules: - id: faang title: FAANG Prep diff --git a/curriculum/tracks/ml-foundations.yaml b/curriculum/tracks/ml-foundations.yaml index d2d12f8..f3d4095 100644 --- a/curriculum/tracks/ml-foundations.yaml +++ b/curriculum/tracks/ml-foundations.yaml @@ -1,54 +1,195 @@ id: ml-foundations title: ML Foundations description: > - Core mathematical and algorithmic foundations needed to understand - modern machine learning systems. -difficulty: intermediate + The math, statistics, and core algorithms behind machine learning - + from arithmetic to gradient descent in 9 modules. +difficulty: beginner +track_type: concept is_published: true +category: core +category_order: 1 +track_order: 1 modules: - - id: vector-similarity - title: Vector Similarity + # ── Module 1: Numbers, Formulas, and Functions ───────────────────────── + - id: numbers-formulas-functions + title: Numbers, Formulas, and Functions objective: > - Understand how vectors represent information and how similarity - between vectors is measured. + Read and manipulate formulas, understand powers, logs, and inverse + operations - removing symbolic friction before statistics and ML. estimated_time_minutes: 45 sort_order: 1 + color: "#55cdff" concepts: - - concept_id: dense-vectors + - concept_id: number sort_order: 1 - - concept_id: dot-product + - concept_id: fraction sort_order: 2 - - concept_id: cosine-similarity + - concept_id: ratio sort_order: 3 - - concept_id: vector-embeddings + - concept_id: variable sort_order: 4 + - concept_id: algebraic-expression + sort_order: 5 + - concept_id: function + sort_order: 6 + - concept_id: exponent + sort_order: 7 + - concept_id: logarithm + sort_order: 8 + - concept_id: square-root + sort_order: 9 - - id: optimization - title: Optimization + # ── Module 2: Describing Data ────────────────────────────────────────── + - id: describing-data + title: Describing Data objective: > - Understand how machine learning models define objectives and how - optimization updates parameters to reduce error. + Measure center and spread, compare variables on a common scale, + and quantify linear relationships. estimated_time_minutes: 60 sort_order: 2 + color: "#4ade80" concepts: - - concept_id: loss-functions + - concept_id: mean sort_order: 1 - - concept_id: gradient-descent + - concept_id: sample-mean sort_order: 2 - - concept_id: backpropagation + - concept_id: deviation-from-mean sort_order: 3 + - concept_id: variance + sort_order: 4 + - concept_id: standard-deviation + sort_order: 5 + - concept_id: covariance + sort_order: 6 + - concept_id: correlation + sort_order: 7 + - concept_id: z-score + sort_order: 8 - - id: attention - title: Attention + # ── Module 3: Probability and Bayes ──────────────────────────────────── + - id: probability-and-bayes + title: Probability and Bayes objective: > - Understand how attention mechanisms allow models to focus on - relevant parts of the input and how multi-head attention scales - this to learn diverse relationships. - estimated_time_minutes: 45 + Reason about uncertainty, update beliefs using evidence, and + prepare for likelihood-based classification. + estimated_time_minutes: 30 sort_order: 3 + color: "#ffc47c" + concepts: + - concept_id: probability + sort_order: 1 + - concept_id: conditional-probability + sort_order: 2 + - concept_id: independence + sort_order: 3 + - concept_id: bayes-rule + sort_order: 4 + + # ── Module 4: From Sample to Population ──────────────────────────────── + - id: from-sample-to-population + title: From Sample to Population + objective: > + Infer population quantities from samples, quantify uncertainty, and + judge whether observed differences are likely noise. + estimated_time_minutes: 45 + sort_order: 4 + color: "#eb5757" + concepts: + - concept_id: sampling + sort_order: 1 + - concept_id: estimator + sort_order: 2 + - concept_id: standard-error + sort_order: 3 + - concept_id: confidence-interval + sort_order: 4 + - concept_id: hypothesis-test + sort_order: 5 + + # ── Module 5: Linear Prediction ──────────────────────────────────────── + - id: linear-prediction + title: Linear Prediction + objective: > + Predict continuous values from weighted inputs, interpret feature + effects, and connect statistics to prediction. + estimated_time_minutes: 30 + sort_order: 5 + color: "#818cf8" + concepts: + - concept_id: feature + sort_order: 1 + - concept_id: linear-model + sort_order: 2 + - concept_id: linear-regression + sort_order: 3 + + # ── Module 6: Probabilities for Classification ───────────────────────── + - id: probabilities-for-classification + title: Probabilities for Classification + objective: > + Turn scores into probabilities, estimate parameters via likelihood, + and model binary outcomes. + estimated_time_minutes: 45 + sort_order: 6 + color: "#f472b6" + concepts: + - concept_id: sigmoid-function + sort_order: 1 + - concept_id: likelihood + sort_order: 2 + - concept_id: maximum-likelihood-estimation + sort_order: 3 + - concept_id: logistic-regression + sort_order: 4 + + # ── Module 7: Loss, Training, and Optimization ───────────────────────── + - id: loss-training-optimization + title: Loss, Training, and Optimization + objective: > + Define task-appropriate losses, understand how models learn, and + relate prediction error to parameter updates. + estimated_time_minutes: 45 + sort_order: 7 + color: "#55cdff" + concepts: + - concept_id: squared-error + sort_order: 1 + - concept_id: mse-loss + sort_order: 2 + - concept_id: cross-entropy-loss + sort_order: 3 + - concept_id: gradient-descent + sort_order: 4 + + # ── Module 8: Generalization and Evaluation ──────────────────────────── + - id: generalization-and-evaluation + title: Generalization and Evaluation + objective: > + Detect memorization vs generalization, choose models with + validation, and control complexity with regularization. + estimated_time_minutes: 45 + sort_order: 8 + color: "#5bb86e" concepts: - - concept_id: attention-mechanism + - concept_id: overfitting sort_order: 1 - - concept_id: multi-head-attention + - concept_id: cross-validation sort_order: 2 + - concept_id: l1-regularization + sort_order: 3 + - concept_id: l2-regularization + sort_order: 4 + + # ── Module 9: Features and Practical Modeling ────────────────────────── + - id: features-and-practical-modeling + title: Features and Practical Modeling + objective: > + Transform raw data into model-ready inputs and improve model + quality through better representation. + estimated_time_minutes: 20 + sort_order: 9 + color: "#ffc47c" + concepts: + - concept_id: feature-engineering + sort_order: 1 diff --git a/curriculum/tracks/mlops.yaml b/curriculum/tracks/mlops.yaml new file mode 100644 index 0000000..b265fb9 --- /dev/null +++ b/curriculum/tracks/mlops.yaml @@ -0,0 +1,115 @@ +id: mlops +title: MLOps & Production ML +description: > + How ML systems are trained, deployed, monitored, and maintained + in production - from data pipelines to model lifecycle management. +difficulty: intermediate +track_type: concept +is_published: true +category: core +category_order: 1 +track_order: 3 + +modules: + # ── Module 1: Data Pipelines ───────────────────────────────────────── + - id: data-pipelines + title: Data Pipelines + objective: > + Build reliable pipelines that ingest, validate, transform, and + version data for reproducible model training. + estimated_time_minutes: 60 + sort_order: 1 + color: "#55cdff" + concepts: + - concept_id: data-ingestion + sort_order: 1 + - concept_id: data-validation + sort_order: 2 + - concept_id: feature-pipeline + sort_order: 3 + - concept_id: data-versioning + sort_order: 4 + - concept_id: etl-vs-elt + sort_order: 5 + + # ── Module 2: Training Systems ─────────────────────────────────────── + - id: training-systems + title: Training Systems + objective: > + Run reproducible training jobs with experiment tracking, hyperparameter + tuning, and distributed compute. + estimated_time_minutes: 60 + sort_order: 2 + color: "#4ade80" + concepts: + - concept_id: experiment-tracking + sort_order: 1 + - concept_id: hyperparameter-tuning + sort_order: 2 + - concept_id: distributed-training + sort_order: 3 + - concept_id: training-reproducibility + sort_order: 4 + - concept_id: gpu-resource-management + sort_order: 5 + + # ── Module 3: Model Serving ────────────────────────────────────────── + - id: model-serving + title: Model Serving + objective: > + Deploy models behind APIs with low latency, handle versioning, + and manage the gap between training and production environments. + estimated_time_minutes: 45 + sort_order: 3 + color: "#ffc47c" + concepts: + - concept_id: model-serialization + sort_order: 1 + - concept_id: inference-server + sort_order: 2 + - concept_id: batch-vs-realtime-inference + sort_order: 3 + - concept_id: model-registry + sort_order: 4 + - concept_id: shadow-deployment + sort_order: 5 + + # ── Module 4: Monitoring ───────────────────────────────────────────── + - id: monitoring + title: Monitoring + objective: > + Detect model degradation, data drift, and performance regressions + before they impact users. + estimated_time_minutes: 45 + sort_order: 4 + color: "#eb5757" + concepts: + - concept_id: data-drift + sort_order: 1 + - concept_id: concept-drift + sort_order: 2 + - concept_id: model-performance-monitoring + sort_order: 3 + - concept_id: alerting-thresholds + sort_order: 4 + + # ── Module 5: Lifecycle ────────────────────────────────────────────── + - id: lifecycle + title: Lifecycle + objective: > + Manage the full model lifecycle from development to retirement, + including CI/CD for ML, rollback, and governance. + estimated_time_minutes: 45 + sort_order: 5 + color: "#818cf8" + concepts: + - concept_id: ml-cicd + sort_order: 1 + - concept_id: canary-rollout + sort_order: 2 + - concept_id: model-rollback + sort_order: 3 + - concept_id: model-governance + sort_order: 4 + - concept_id: technical-debt-ml + sort_order: 5 diff --git a/curriculum/tracks/practical-ml.yaml b/curriculum/tracks/practical-ml.yaml new file mode 100644 index 0000000..6df922c --- /dev/null +++ b/curriculum/tracks/practical-ml.yaml @@ -0,0 +1,113 @@ +id: practical-ml +title: Practical ML Systems +description: > + How production ML systems actually work - recommendations, ranking, + retrieval, feedback loops, and evaluation beyond accuracy. +difficulty: intermediate +track_type: concept +is_published: true +category: core +category_order: 1 +track_order: 2 + +modules: + # ── Module 1: Recommender Systems ───────────────────────────────────── + - id: recommender-systems + title: Recommender Systems + objective: > + Understand collaborative filtering, content-based methods, and hybrid + approaches that power product and content recommendations. + estimated_time_minutes: 60 + sort_order: 1 + color: "#55cdff" + concepts: + - concept_id: collaborative-filtering + sort_order: 1 + - concept_id: content-based-filtering + sort_order: 2 + - concept_id: matrix-factorization + sort_order: 3 + - concept_id: hybrid-recommender + sort_order: 4 + - concept_id: cold-start-problem + sort_order: 5 + + # ── Module 2: Ranking Systems ───────────────────────────────────────── + - id: ranking-systems + title: Ranking Systems + objective: > + Learn how search engines and feeds order results using pointwise, + pairwise, and listwise learning-to-rank approaches. + estimated_time_minutes: 60 + sort_order: 2 + color: "#4ade80" + concepts: + - concept_id: pointwise-ranking + sort_order: 1 + - concept_id: pairwise-ranking + sort_order: 2 + - concept_id: listwise-ranking + sort_order: 3 + - concept_id: ndcg + sort_order: 4 + - concept_id: feature-store + sort_order: 5 + + # ── Module 3: Retrieval Systems ─────────────────────────────────────── + - id: retrieval-systems + title: Retrieval Systems + objective: > + Build intuition for approximate nearest neighbor search, two-tower + models, and the retrieval-then-rank pipeline used in modern systems. + estimated_time_minutes: 45 + sort_order: 3 + color: "#ffc47c" + concepts: + - concept_id: embedding-similarity + sort_order: 1 + - concept_id: approximate-nearest-neighbor + sort_order: 2 + - concept_id: two-tower-model + sort_order: 3 + - concept_id: retrieval-then-rank + sort_order: 4 + + # ── Module 4: Feedback Loops ────────────────────────────────────────── + - id: feedback-loops + title: Feedback Loops + objective: > + Recognize how model predictions shape future training data, and + learn strategies to detect and mitigate feedback loop effects. + estimated_time_minutes: 45 + sort_order: 4 + color: "#eb5757" + concepts: + - concept_id: feedback-loop + sort_order: 1 + - concept_id: popularity-bias + sort_order: 2 + - concept_id: exploration-exploitation + sort_order: 3 + - concept_id: position-bias + sort_order: 4 + + # ── Module 5: Evaluation Metrics ────────────────────────────────────── + - id: evaluation-metrics + title: Evaluation Metrics + objective: > + Go beyond accuracy to measure what matters in production - precision, + recall, calibration, and online experiment design. + estimated_time_minutes: 45 + sort_order: 5 + color: "#818cf8" + concepts: + - concept_id: precision-recall + sort_order: 1 + - concept_id: roc-auc + sort_order: 2 + - concept_id: calibration + sort_order: 3 + - concept_id: online-ab-testing + sort_order: 4 + - concept_id: offline-online-gap + sort_order: 5 diff --git a/frontend/src/lib/api/types.ts b/frontend/src/lib/api/types.ts index 7013e30..2b9e0e7 100644 --- a/frontend/src/lib/api/types.ts +++ b/frontend/src/lib/api/types.ts @@ -1493,6 +1493,13 @@ export interface StructuredExplanation { prerequisite_note: string | null; } +export interface ChunkPreview { + source_id: string; + title: string; + preview: string; + relevance_score: number; +} + export interface ConceptExplanation { concept_id: string; concept_name: string; @@ -1500,6 +1507,9 @@ export interface ConceptExplanation { structured: StructuredExplanation | null; source_ids: string[]; chunk_count: number; + source_titles: Record; + dossier_grounded: boolean; + chunk_previews: ChunkPreview[]; } export interface PracticeItem { @@ -1554,6 +1564,9 @@ export interface CurriculumTrackSummary { difficulty: string; track_type: "concept" | "resource"; is_published: boolean; + category: string; + category_order: number; + track_order: number; module_count: number; concept_count: number; resource_count: number; diff --git a/frontend/src/lib/learner-profile.ts b/frontend/src/lib/learner-profile.ts new file mode 100644 index 0000000..8d814d9 --- /dev/null +++ b/frontend/src/lib/learner-profile.ts @@ -0,0 +1,126 @@ +/** + * Learner profile — single source of truth for learning preferences. + * + * Drives track ordering and (later) curriculum personalization. + * Hardcoded for now; could move to backend/DB when multi-user or + * when the profile needs to be editable from UI. + */ + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- + +export interface LearnerProfile { + profileId: string; + primaryGoal: string; + backgroundSummary: string; + strengths: string[]; + weaknesses: string[]; + preferences: { + intuitionFirst: boolean; + practicalFirst: boolean; + theoryDepth: "low" | "medium" | "high"; + }; + focusAreas: string[]; + deEmphasize: string[]; + lastUpdated: string; +} + +// --------------------------------------------------------------------------- +// Hardcoded initial profile +// --------------------------------------------------------------------------- + +export const adrienV1: LearnerProfile = { + profileId: "adrien_v1", + primaryGoal: + "High-ROI path into data / ML systems / product-oriented engineering work", + backgroundSummary: + "Operations, systems, and business background. Not formal ML research. " + + "Practical, application-first learner.", + strengths: [ + "systems thinking", + "product intuition", + "frontend engineering", + "operational workflows", + ], + weaknesses: [ + "formal ML theory", + "advanced statistics", + "research paper reading", + ], + preferences: { + intuitionFirst: true, + practicalFirst: true, + theoryDepth: "medium", + }, + focusAreas: [ + "ml-foundations", + "ai-engineering", + "databases", + "applied-systems", + "frontend-engineering", + ], + deEmphasize: [ + "bio-augmentation", + "embodied-ai", + "behavioral-design", + "cognitive-toolkit", + ], + lastUpdated: "2026-03-17", +}; + +// --------------------------------------------------------------------------- +// Track ordering resolver +// --------------------------------------------------------------------------- + +interface TrackLike { + id: string; +} + +/** + * Deterministic track ordering driven by learner profile. + * + * Rules (applied in order): + * 1. Tracks in `focusAreas` come first, in focusAreas order. + * 2. Tracks in `deEmphasize` come last, in deEmphasize order. + * 3. Everything else keeps its original order in between. + * + * Tracks whose id doesn't appear in the available list are silently skipped. + */ +export function resolveTrackOrder( + profile: LearnerProfile, + tracks: T[], +): T[] { + const byId = new Map(tracks.map((t) => [t.id, t])); + + const focused: T[] = []; + const neutral: T[] = []; + const deprioritized: T[] = []; + + const focusSet = new Set(profile.focusAreas); + const deSet = new Set(profile.deEmphasize); + + // Pull focused tracks in profile-specified order + for (const id of profile.focusAreas) { + const t = byId.get(id); + if (t) focused.push(t); + } + + // Pull deprioritized tracks in profile-specified order + for (const id of profile.deEmphasize) { + const t = byId.get(id); + if (t) deprioritized.push(t); + } + + // Everything else keeps original order + for (const t of tracks) { + if (!focusSet.has(t.id) && !deSet.has(t.id)) { + neutral.push(t); + } + } + + return [...focused, ...neutral, ...deprioritized]; +} + +// Active profile — swap this when adding profile selection later +export const activeProfile = adrienV1; diff --git a/frontend/src/views/ConceptDetailView.tsx b/frontend/src/views/ConceptDetailView.tsx index a03f8fc..c96c6eb 100644 --- a/frontend/src/views/ConceptDetailView.tsx +++ b/frontend/src/views/ConceptDetailView.tsx @@ -5,6 +5,8 @@ import { ArrowLeft, BookOpen, Brain, + ChevronDown, + ChevronRight, Dumbbell, GitBranch, Eye, @@ -495,6 +497,13 @@ const markdownComponents: Record = { }; function ExplanationResult({ data }: { data: ConceptExplanation }) { + const [sourcesOpen, setSourcesOpen] = useState(false); + const navigate = useNavigate(); + + const sourceTitles = data.source_titles ?? {}; + const chunkPreviews = data.chunk_previews ?? []; + const isDossierOnly = data.dossier_grounded && data.chunk_count === 0; + return (
-
- Based on {data.chunk_count} chunk{data.chunk_count !== 1 ? "s" : ""}{" "} - from {data.source_ids.length} source - {data.source_ids.length !== 1 ? "s" : ""} + + {/* Provenance footer */} +
+ {isDossierOnly ? ( +
+ Based on internal concept dossier +
+ ) : ( + <> +
+ Based on {data.chunk_count} chunk + {data.chunk_count !== 1 ? "s" : ""} from {data.source_ids.length}{" "} + source + {data.source_ids.length !== 1 ? "s" : ""} + {data.dossier_grounded ? " + concept dossier" : ""} +
+ + {data.source_ids.length > 0 && ( + + )} + + {sourcesOpen && ( + + {data.source_ids.map((sid) => { + const title = sourceTitles[sid]; + const hasTitle = + title && title !== sid && title !== "Untitled"; + return ( +
+
+ + {hasTitle && ( + + {sid.length > 24 ? sid.slice(0, 24) + "..." : sid} + + )} +
+ {chunkPreviews + .filter((c) => c.source_id === sid) + .slice(0, 2) + .map((chunk, i) => ( +
+ + {Math.round(chunk.relevance_score * 100)}% + + {chunk.preview} +
+ ))} +
+ ); + })} +
+ )} + + )}
); diff --git a/frontend/src/views/CurriculumTracksView.tsx b/frontend/src/views/CurriculumTracksView.tsx index 248fd40..da15640 100644 --- a/frontend/src/views/CurriculumTracksView.tsx +++ b/frontend/src/views/CurriculumTracksView.tsx @@ -1,3 +1,4 @@ +import { useMemo } from "react"; import { useNavigate } from "@tanstack/react-router"; import { motion } from "motion/react"; import { @@ -16,12 +17,48 @@ import type { CurriculumTrackSummary } from "../lib/api/types"; const ACCENT = "#55cdff"; +const CATEGORY_LABELS: Record = { + core: "Core", + systems: "Systems", + product: "Product", + specialization: "Specialization", + career: "Career", +}; + const DIFFICULTY_LABEL: Record = { beginner: "Beginner", intermediate: "Intermediate", advanced: "Advanced", }; +interface CategoryGroup { + category: string; + label: string; + tracks: CurriculumTrackSummary[]; +} + +function groupByCategory(tracks: CurriculumTrackSummary[]): CategoryGroup[] { + const map = new Map(); + const orderMap = new Map(); + + for (const track of tracks) { + const cat = track.category || "uncategorized"; + if (!map.has(cat)) { + map.set(cat, []); + orderMap.set(cat, track.category_order); + } + map.get(cat)!.push(track); + } + + return Array.from(map.entries()) + .sort((a, b) => (orderMap.get(a[0]) ?? 99) - (orderMap.get(b[0]) ?? 99)) + .map(([category, items]) => ({ + category, + label: CATEGORY_LABELS[category] ?? category, + tracks: items, + })); +} + export function CurriculumTracksView() { useDocumentTitle("Learning Tracks"); const navigate = useNavigate(); @@ -33,6 +70,10 @@ export function CurriculumTracksView() { ); const tracks = data?.tracks ?? []; + const groups = useMemo(() => groupByCategory(tracks), [tracks]); + + // Global numbering across all categories + let globalIndex = 0; return ( @@ -50,20 +91,39 @@ export function CurriculumTracksView() { ) : tracks.length === 0 ? ( ) : ( -
- {tracks.map((track, i) => ( - - navigate({ - to: "/learn/tracks/$trackId", - params: { trackId: track.id }, - }) - } - /> - ))} +
+ {groups.map((group) => { + const sectionStart = globalIndex; + return ( +
+

+ {group.label} +

+
+ {group.tracks.map((track, i) => { + const cardIndex = sectionStart + i; + globalIndex = cardIndex + 1; + return ( + + navigate({ + to: "/learn/tracks/$trackId", + params: { trackId: track.id }, + }) + } + /> + ); + })} +
+
+ ); + })}
)} @@ -93,10 +153,12 @@ function TrackCard({ className="w-full rounded-[12px] border border-white/[0.05] bg-white/[0.02] p-4 text-left transition-colors duration-150 hover:border-white/[0.12] hover:bg-white/[0.035] hover:shadow-[0_1px_8px_rgba(255,255,255,0.04)] cursor-pointer" >
- + + {index + 1} +
Date: Tue, 17 Mar 2026 23:57:06 +0100 Subject: [PATCH 07/10] refactor(curriculum): audit-driven cleanup and dossier-first explanation fallback Curriculum YAML cleanup from product audit: - Track 1: remove remedial concepts (number, fraction), merge redundant concepts (sample-mean, deviation-from-mean, squared-error, l1-reg), add one-hot-encoding and normalization-scaling to Module 9 - Track 2: swap Retrieval before Ranking (pipeline order), remove feature-store (canonical home is Track 4), add embedding prerequisite - Track 3: remove etl-vs-elt (redundant with Track 4), merge alerting-thresholds and model-rollback into neighbors - Track 5: rename batch-vs-real-time-serving to match Track 3 naming Create 5 missing canonical concepts with minimal dossiers: embedding, bias-variance-tradeoff, one-hot-encoding, normalization-scaling, regularization Fix concept explanation fallback to be dossier-first: - Fetch dossier before attempting retrieval - Dossier-only concepts produce real explanations (no dead-end) - LLM fallback when neither dossier nor sources exist - Three explicit provenance modes in API and UI: dossier+retrieval, dossier_only, llm_fallback --- backend/api/concepts.py | 1 + .../jobs/seed_missing_curriculum_concepts.py | 134 ++++++++++++ .../services/learning/concept_explainer.py | 194 ++++++++++++------ curriculum/tracks/applied-systems.yaml | 2 +- curriculum/tracks/ml-foundations.yaml | 54 ++--- curriculum/tracks/mlops.yaml | 10 +- curriculum/tracks/practical-ml.yaml | 42 ++-- frontend/src/lib/api/types.ts | 1 + frontend/src/views/ConceptDetailView.tsx | 14 +- 9 files changed, 317 insertions(+), 135 deletions(-) create mode 100644 backend/jobs/seed_missing_curriculum_concepts.py diff --git a/backend/api/concepts.py b/backend/api/concepts.py index 7e77646..672437c 100644 --- a/backend/api/concepts.py +++ b/backend/api/concepts.py @@ -100,6 +100,7 @@ class ExplanationResponse(BaseModel): source_titles: dict[str, str] = {} dossier_grounded: bool = False chunk_previews: list[ChunkPreviewResponse] = [] + explanation_source: str = "dossier+retrieval" class PracticeRequest(BaseModel): diff --git a/backend/jobs/seed_missing_curriculum_concepts.py b/backend/jobs/seed_missing_curriculum_concepts.py new file mode 100644 index 0000000..b22766d --- /dev/null +++ b/backend/jobs/seed_missing_curriculum_concepts.py @@ -0,0 +1,134 @@ +"""Bootstrap missing canonical concepts required by curriculum tracks. + +Creates concept rows + minimal dossiers for concepts that exist in +curriculum YAML but are missing from the canonical registry or lack dossiers. + +Usage: + python -m backend.jobs.seed_missing_curriculum_concepts + python -m backend.jobs.seed_missing_curriculum_concepts --dry-run +""" + +import argparse +import json +import logging +import sys + +logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") +logger = logging.getLogger(__name__) + +SRC = "curriculum:missing-concepts-v1" + +# (id, name, description, lens, level, intuition, formula, ml_usage) +CONCEPTS = [ + ( + "embedding", + "Embedding", + "Learned dense vector representation that maps discrete items into continuous space.", + "machine_learning", + "university", + "Turn a word, user, or item into a list of numbers that captures its meaning.", + "embed(x) -> R^d", + "Retrieval, recommendations, semantic search, language models.", + ), + ( + "bias-variance-tradeoff", + "Bias-Variance Tradeoff", + "Fundamental tension between underfitting (high bias) and overfitting (high variance) in model selection.", + "machine_learning", + "university", + "Simple models miss patterns; complex models memorize noise. The sweet spot is in between.", + "Error = Bias^2 + Variance + Noise", + "Model selection, regularization strength, ensemble methods.", + ), + ( + "one-hot-encoding", + "One-Hot Encoding", + "Representation of a categorical variable as a binary vector with exactly one 1.", + "machine_learning", + "university", + "Turn a category like 'red' into [1, 0, 0] so a model can use it as a number.", + "[0, ..., 1, ..., 0]", + "Categorical feature preprocessing for linear models and trees.", + ), + ( + "normalization-scaling", + "Normalization and Scaling", + "Transforming features to a common range or distribution before modeling.", + "machine_learning", + "university", + "Put all features on the same scale so no single feature dominates by magnitude.", + "z = (x - mean) / std or x' = (x - min) / (max - min)", + "Gradient-based training, distance-based models, convergence speed.", + ), + ( + "regularization", + "Regularization", + "Technique that penalizes model complexity to reduce overfitting.", + "machine_learning", + "university", + "Add a cost for large weights so the model stays simple and generalizes better.", + "loss + lambda * penalty(w)", + "L1 sparsity, L2 shrinkage, dropout, early stopping.", + ), +] + + +def seed(dry_run: bool = False) -> bool: + """Seed missing concept rows + minimal dossiers.""" + from backend.services.chat.history import get_db, init_db + + init_db() + conn = get_db() + + created = 0 + for cid, name, desc, lens, level, intuition, formula, ml_usage in CONCEPTS: + # Concept row (INSERT OR REPLACE to handle existing-without-dossier cases) + conn.execute( + """INSERT OR REPLACE INTO concepts + (id, name, description, lens, level, source_count) + VALUES (?, ?, ?, ?, ?, 0)""", + (cid, name, desc, lens, level), + ) + + # Minimal dossier + formulas = ( + [{"content": formula, "content_latex": formula, + "source_id": SRC, "source_title": None}] + if formula else [] + ) + insights = [{"content": ml_usage, "source_id": SRC}] + atom_count = 2 + (1 if formula else 0) + 1 # def + intuition + formula? + insight + + conn.execute( + """INSERT OR REPLACE INTO concept_dossiers + (concept_id, formal_definition, intuitive_definition, + formulas_json, examples_json, prerequisite_claims_json, + key_insights_json, source_ids_json, + atom_count, source_count, built_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 1, datetime('now'))""", + ( + cid, desc, intuition, + json.dumps(formulas), "[]", "[]", + json.dumps(insights), json.dumps([SRC]), + atom_count, + ), + ) + created += 1 + logger.info(" %s: concept + dossier", cid) + + if dry_run: + logger.info("Dry run - %d concepts validated, rolling back", created) + conn.rollback() + else: + conn.commit() + logger.info("Seeded %d concepts + dossiers", created) + + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Bootstrap missing curriculum concepts") + parser.add_argument("--dry-run", action="store_true") + args = parser.parse_args() + success = seed(dry_run=args.dry_run) + sys.exit(0 if success else 1) diff --git a/backend/services/learning/concept_explainer.py b/backend/services/learning/concept_explainer.py index cf6857c..bfb5f9d 100644 --- a/backend/services/learning/concept_explainer.py +++ b/backend/services/learning/concept_explainer.py @@ -41,6 +41,21 @@ - If the source material is thin, say so honestly - Keep it concise - this is a focused explanation, not a textbook chapter""" +LLM_FALLBACK_PROMPT = """You are an expert tutor explaining a concept to a motivated learner. + +CONCEPT: {concept_name} + +You have no source material for this concept. Generate a clear, accurate explanation from your training knowledge. + +Structure your response as: +1. **Direct answer** (2-3 sentences explaining what this concept is and why it matters) +2. **How it works** (bullet points covering the key mechanics or principles) +3. **Example** (one concrete example) + +Rules: +- Be accurate and concise +- Keep it focused - this is a brief explanation, not a textbook chapter""" + PROFILED_EXPLAIN_PROMPT = """You are a tutor explaining a concept. Your teaching adapts to the learner's level and domain lens. CONCEPT: {concept_name} @@ -155,6 +170,8 @@ class ConceptExplanation(BaseModel): source_titles: dict[str, str] = {} dossier_grounded: bool = False chunk_previews: list[ChunkPreview] = [] + explanation_source: str = "dossier+retrieval" + # "dossier+retrieval" | "dossier_only" | "llm_fallback" async def explain_concept( @@ -167,67 +184,65 @@ async def explain_concept( ) -> ConceptExplanation: """Generate a grounded explanation for a concept. - Retrieves relevant chunks from the concept's sources, then asks - the LLM to synthesize an explanation using only that material. + Priority order: + 1. Dossier (>= 3 atoms) as primary source, retrieval as enrichment + 2. Dossier-only if retrieval finds nothing relevant + 3. LLM fallback if dossier is too thin AND no retrieval """ + from backend.services.learning.dossier_builder import get_dossier + + # Step 1: Fetch dossier first — it's always available for seeded concepts + dossier = get_dossier(concept_id) + has_dossier = dossier is not None and dossier.atom_count >= 2 + has_rich_dossier = dossier is not None and dossier.atom_count >= 3 + + # Step 2: Attempt retrieval (enrichment, not required) + docs: list = [] + scores: list[float] = [] from backend.services.retrieval import get_retriever - # Retrieve chunks relevant to this concept retriever = get_retriever("All") - if not retriever: - return ConceptExplanation( - concept_id=concept_id, - concept_name=concept_name, - explanation=f"No source material available for '{concept_name}'. Ingest content that covers this concept first.", - source_ids=[], - chunk_count=0, + if retriever: + docs, timing = retriever.retrieve_with_timing(concept_name) + scores = timing.get("scores", []) + + # Pad scores if missing (reranker unavailable) + if len(scores) < len(docs): + scores.extend([0.0] * (len(docs) - len(scores))) + + # Filter by rerank relevance score — concept explanations need high relevance + # to avoid keyword-only matches (e.g. "Number" matching "this number is impressive") + CONCEPT_RELEVANCE_THRESHOLD = 0.90 + if scores: + paired = [(d, s) for d, s in zip(docs, scores) if s >= CONCEPT_RELEVANCE_THRESHOLD] + if paired: + docs, scores = [list(t) for t in zip(*paired)] + else: + docs, scores = [], [] + + # Filter to chunks from sources that actually mention this concept + if source_ids and docs: + source_set = set(source_ids) + paired = [ + (d, s) for d, s in zip(docs, scores) + if d.metadata.get("source_id") in source_set + or d.metadata.get("video_id") in source_set + ] + if paired: + docs, scores = [list(t) for t in zip(*paired)] + + docs = docs[:max_chunks] + + # Step 3: Decide explanation strategy + has_chunks = len(docs) > 0 + + if not has_dossier and not has_chunks: + # LLM fallback — no grounding material at all + return await _llm_fallback_explanation( + concept_id, concept_name, teaching_profile_id, teaching_lens, ) - docs, timing = retriever.retrieve_with_timing(concept_name) - scores: list[float] = timing.get("scores", []) - - # Pad scores if missing (reranker unavailable) - if len(scores) < len(docs): - scores.extend([0.0] * (len(docs) - len(scores))) - - # Filter by rerank relevance score — concept explanations need high relevance - # to avoid keyword-only matches (e.g. "Number" matching "this number is impressive") - # Concept explanations need high relevance — keyword-only matches (e.g. - # "Number" matching "this number is impressive") score 70-82%, while - # genuinely relevant chunks score 90%+. Use 0.90 to keep only real hits. - CONCEPT_RELEVANCE_THRESHOLD = 0.90 - if scores: - paired = [(d, s) for d, s in zip(docs, scores) if s >= CONCEPT_RELEVANCE_THRESHOLD] - if paired: - docs, scores = [list(t) for t in zip(*paired)] - else: - # All below threshold — fall back to dossier-only - docs, scores = [], [] - - # Filter to chunks from sources that actually mention this concept - if source_ids and docs: - source_set = set(source_ids) - paired = [ - (d, s) for d, s in zip(docs, scores) - if d.metadata.get("source_id") in source_set - or d.metadata.get("video_id") in source_set - ] - # Fall back to all results if filtering removes everything - if paired: - docs, scores = [list(t) for t in zip(*paired)] - - docs = docs[:max_chunks] - - if not docs: - return ConceptExplanation( - concept_id=concept_id, - concept_name=concept_name, - explanation=f"No relevant source chunks found for '{concept_name}'. The concept exists in your learnings but the original source material may not contain enough detail.", - source_ids=[], - chunk_count=0, - ) - - # Build chunk context + # Build chunk context (may be empty — that's fine) chunk_texts = [] seen_sources: set[str] = set() source_titles: dict[str, str] = {} @@ -239,7 +254,6 @@ async def explain_concept( seen_sources.add(source_id) source_titles[source_id] = title raw = doc.page_content.strip() - # Strip [Source: ...] prefix if baked into chunk text if raw.startswith("[Source:"): close = raw.find("]") if close != -1: @@ -250,15 +264,11 @@ async def explain_concept( chunk_score = scores[idx] if idx < len(scores) else 0.0 chunk_previews.append(ChunkPreview(source_id=source_id, title=title, preview=preview, relevance_score=round(chunk_score, 3))) - chunks_str = "\n\n---\n\n".join(chunk_texts) + chunks_str = "\n\n---\n\n".join(chunk_texts) if chunk_texts else "No supporting source chunks available." - # Fetch dossier for structured substrate - from backend.services.learning.dossier_builder import get_dossier - - dossier = get_dossier(concept_id) dossier_section = _format_dossier_for_prompt(dossier) - # Build prompt - use profiled version if teaching profile/lens provided + # Build prompt if teaching_profile_id or teaching_lens: profile = get_profile(teaching_profile_id or "university") lens = get_lens(teaching_lens or "computer_science") @@ -302,7 +312,6 @@ async def explain_concept( settings = get_settings() start = time.time() - # Use Groq for fast explanation generation llm = get_chat_llm(temperature=0.3, max_tokens=2000) response = await ainvoke_with_observability( llm, @@ -320,8 +329,7 @@ async def explain_concept( if teaching_profile_id or teaching_lens: structured = _parse_structured_explanation(explanation_text) - # Update concept description in DB only for default/canonical explanations. - # Profiled explanations are variant renderings - not persisted as the canonical description. + # Update concept description in DB only for default/canonical explanations if not (teaching_profile_id or teaching_lens): from backend.services.learning.concept_store import _get_conn @@ -332,7 +340,7 @@ async def explain_concept( ) conn.commit() - dossier_grounded = dossier is not None and dossier.atom_count > 0 + explanation_source = "dossier+retrieval" if has_chunks else "dossier_only" return ConceptExplanation( concept_id=concept_id, @@ -342,8 +350,60 @@ async def explain_concept( source_ids=list(seen_sources), chunk_count=len(docs), source_titles=source_titles, - dossier_grounded=dossier_grounded, + dossier_grounded=has_rich_dossier, chunk_previews=chunk_previews, + explanation_source=explanation_source, + ) + + +async def _llm_fallback_explanation( + concept_id: str, + concept_name: str, + teaching_profile_id: str | None, + teaching_lens: str | None, +) -> ConceptExplanation: + """Generate explanation purely from LLM knowledge when no dossier or sources exist.""" + from backend.config import get_settings + from backend.services.llm.client import ( + ainvoke_with_observability, + get_chat_llm, + record_llm_call, + ) + + logger.info("LLM fallback for concept '%s' (no dossier, no retrieval)", concept_id) + + prompt = LLM_FALLBACK_PROMPT.format(concept_name=concept_name) + + settings = get_settings() + start = time.time() + + llm = get_chat_llm(temperature=0.3, max_tokens=2000) + response = await ainvoke_with_observability( + llm, + prompt, + operation="concept_explain_fallback", + provider="groq", + model=settings.chat_model, + ) + record_llm_call(response, "groq", settings.chat_model, "concept_explain_fallback", start) + + explanation_text = response.content.strip() + + structured = None + if teaching_profile_id or teaching_lens: + structured = _parse_structured_explanation(explanation_text) + + return ConceptExplanation( + concept_id=concept_id, + concept_name=concept_name, + explanation=explanation_text, + structured=structured, + source_ids=[], + chunk_count=0, + source_titles={}, + dossier_grounded=False, + chunk_previews=[], + explanation_source="llm_fallback", ) diff --git a/curriculum/tracks/applied-systems.yaml b/curriculum/tracks/applied-systems.yaml index d128cea..82d1726 100644 --- a/curriculum/tracks/applied-systems.yaml +++ b/curriculum/tracks/applied-systems.yaml @@ -88,7 +88,7 @@ modules: concepts: - concept_id: latency-throughput-tradeoff sort_order: 1 - - concept_id: batch-vs-real-time-serving + - concept_id: batch-vs-realtime-inference sort_order: 2 - concept_id: model-complexity-tradeoff sort_order: 3 diff --git a/curriculum/tracks/ml-foundations.yaml b/curriculum/tracks/ml-foundations.yaml index f3d4095..7f333f5 100644 --- a/curriculum/tracks/ml-foundations.yaml +++ b/curriculum/tracks/ml-foundations.yaml @@ -21,24 +21,18 @@ modules: sort_order: 1 color: "#55cdff" concepts: - - concept_id: number - sort_order: 1 - - concept_id: fraction - sort_order: 2 - concept_id: ratio - sort_order: 3 + sort_order: 1 - concept_id: variable - sort_order: 4 + sort_order: 2 - concept_id: algebraic-expression - sort_order: 5 + sort_order: 3 - concept_id: function - sort_order: 6 + sort_order: 4 - concept_id: exponent - sort_order: 7 + sort_order: 5 - concept_id: logarithm - sort_order: 8 - - concept_id: square-root - sort_order: 9 + sort_order: 6 # ── Module 2: Describing Data ────────────────────────────────────────── - id: describing-data @@ -52,20 +46,16 @@ modules: concepts: - concept_id: mean sort_order: 1 - - concept_id: sample-mean - sort_order: 2 - - concept_id: deviation-from-mean - sort_order: 3 - concept_id: variance - sort_order: 4 + sort_order: 2 - concept_id: standard-deviation - sort_order: 5 + sort_order: 3 - concept_id: covariance - sort_order: 6 + sort_order: 4 - concept_id: correlation - sort_order: 7 + sort_order: 5 - concept_id: z-score - sort_order: 8 + sort_order: 6 # ── Module 3: Probability and Bayes ──────────────────────────────────── - id: probability-and-bayes @@ -119,10 +109,8 @@ modules: concepts: - concept_id: feature sort_order: 1 - - concept_id: linear-model - sort_order: 2 - concept_id: linear-regression - sort_order: 3 + sort_order: 2 # ── Module 6: Probabilities for Classification ───────────────────────── - id: probabilities-for-classification @@ -153,14 +141,12 @@ modules: sort_order: 7 color: "#55cdff" concepts: - - concept_id: squared-error - sort_order: 1 - concept_id: mse-loss - sort_order: 2 + sort_order: 1 - concept_id: cross-entropy-loss - sort_order: 3 + sort_order: 2 - concept_id: gradient-descent - sort_order: 4 + sort_order: 3 # ── Module 8: Generalization and Evaluation ──────────────────────────── - id: generalization-and-evaluation @@ -174,11 +160,11 @@ modules: concepts: - concept_id: overfitting sort_order: 1 - - concept_id: cross-validation + - concept_id: bias-variance-tradeoff sort_order: 2 - - concept_id: l1-regularization + - concept_id: cross-validation sort_order: 3 - - concept_id: l2-regularization + - concept_id: regularization sort_order: 4 # ── Module 9: Features and Practical Modeling ────────────────────────── @@ -193,3 +179,7 @@ modules: concepts: - concept_id: feature-engineering sort_order: 1 + - concept_id: one-hot-encoding + sort_order: 2 + - concept_id: normalization-scaling + sort_order: 3 diff --git a/curriculum/tracks/mlops.yaml b/curriculum/tracks/mlops.yaml index b265fb9..c95a970 100644 --- a/curriculum/tracks/mlops.yaml +++ b/curriculum/tracks/mlops.yaml @@ -29,8 +29,6 @@ modules: sort_order: 3 - concept_id: data-versioning sort_order: 4 - - concept_id: etl-vs-elt - sort_order: 5 # ── Module 2: Training Systems ─────────────────────────────────────── - id: training-systems @@ -90,8 +88,6 @@ modules: sort_order: 2 - concept_id: model-performance-monitoring sort_order: 3 - - concept_id: alerting-thresholds - sort_order: 4 # ── Module 5: Lifecycle ────────────────────────────────────────────── - id: lifecycle @@ -107,9 +103,7 @@ modules: sort_order: 1 - concept_id: canary-rollout sort_order: 2 - - concept_id: model-rollback - sort_order: 3 - concept_id: model-governance - sort_order: 4 + sort_order: 3 - concept_id: technical-debt-ml - sort_order: 5 + sort_order: 4 diff --git a/curriculum/tracks/practical-ml.yaml b/curriculum/tracks/practical-ml.yaml index 6df922c..405f844 100644 --- a/curriculum/tracks/practical-ml.yaml +++ b/curriculum/tracks/practical-ml.yaml @@ -32,44 +32,44 @@ modules: - concept_id: cold-start-problem sort_order: 5 - # ── Module 2: Ranking Systems ───────────────────────────────────────── - - id: ranking-systems - title: Ranking Systems + # ── Module 2: Retrieval Systems ─────────────────────────────────────── + - id: retrieval-systems + title: Retrieval Systems objective: > - Learn how search engines and feeds order results using pointwise, - pairwise, and listwise learning-to-rank approaches. - estimated_time_minutes: 60 + Build intuition for approximate nearest neighbor search, two-tower + models, and the retrieval-then-rank pipeline used in modern systems. + estimated_time_minutes: 45 sort_order: 2 color: "#4ade80" concepts: - - concept_id: pointwise-ranking + - concept_id: embedding sort_order: 1 - - concept_id: pairwise-ranking + - concept_id: embedding-similarity sort_order: 2 - - concept_id: listwise-ranking + - concept_id: approximate-nearest-neighbor sort_order: 3 - - concept_id: ndcg + - concept_id: two-tower-model sort_order: 4 - - concept_id: feature-store + - concept_id: retrieval-then-rank sort_order: 5 - # ── Module 3: Retrieval Systems ─────────────────────────────────────── - - id: retrieval-systems - title: Retrieval Systems + # ── Module 3: Ranking Systems ───────────────────────────────────────── + - id: ranking-systems + title: Ranking Systems objective: > - Build intuition for approximate nearest neighbor search, two-tower - models, and the retrieval-then-rank pipeline used in modern systems. - estimated_time_minutes: 45 + Learn how search engines and feeds order results using pointwise, + pairwise, and listwise learning-to-rank approaches. + estimated_time_minutes: 60 sort_order: 3 color: "#ffc47c" concepts: - - concept_id: embedding-similarity + - concept_id: pointwise-ranking sort_order: 1 - - concept_id: approximate-nearest-neighbor + - concept_id: pairwise-ranking sort_order: 2 - - concept_id: two-tower-model + - concept_id: listwise-ranking sort_order: 3 - - concept_id: retrieval-then-rank + - concept_id: ndcg sort_order: 4 # ── Module 4: Feedback Loops ────────────────────────────────────────── diff --git a/frontend/src/lib/api/types.ts b/frontend/src/lib/api/types.ts index 2b9e0e7..7ae6c28 100644 --- a/frontend/src/lib/api/types.ts +++ b/frontend/src/lib/api/types.ts @@ -1510,6 +1510,7 @@ export interface ConceptExplanation { source_titles: Record; dossier_grounded: boolean; chunk_previews: ChunkPreview[]; + explanation_source: "dossier+retrieval" | "dossier_only" | "llm_fallback"; } export interface PracticeItem { diff --git a/frontend/src/views/ConceptDetailView.tsx b/frontend/src/views/ConceptDetailView.tsx index c96c6eb..a2830b5 100644 --- a/frontend/src/views/ConceptDetailView.tsx +++ b/frontend/src/views/ConceptDetailView.tsx @@ -502,7 +502,7 @@ function ExplanationResult({ data }: { data: ConceptExplanation }) { const sourceTitles = data.source_titles ?? {}; const chunkPreviews = data.chunk_previews ?? []; - const isDossierOnly = data.dossier_grounded && data.chunk_count === 0; + const explSource = data.explanation_source ?? "dossier+retrieval"; return ( - {isDossierOnly ? ( + {explSource === "llm_fallback" ? (
- Based on internal concept dossier + AI-generated (no sources) +
+ ) : explSource === "dossier_only" ? ( +
+ Based on concept dossier - no source material available yet
) : ( <>
- Based on {data.chunk_count} chunk - {data.chunk_count !== 1 ? "s" : ""} from {data.source_ids.length}{" "} - source + Based on {data.source_ids.length} source {data.source_ids.length !== 1 ? "s" : ""} {data.dossier_grounded ? " + concept dossier" : ""}
From 6507c5854d108aeac7b78dcac7b00df05992e8e7 Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Wed, 18 Mar 2026 00:00:08 +0100 Subject: [PATCH 08/10] fix(tests): update curriculum test fixture schema to match production Test fixture was using old schema (sort_order column, no track_type/ category/color). Updated to match current production tables. Also updated stale YAML assertions for ml-foundations (9 modules, not 3). --- tests/test_curriculum.py | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/tests/test_curriculum.py b/tests/test_curriculum.py index 4fdc52c..b146287 100644 --- a/tests/test_curriculum.py +++ b/tests/test_curriculum.py @@ -64,8 +64,11 @@ def db(): title TEXT NOT NULL, description TEXT NOT NULL DEFAULT '', difficulty TEXT NOT NULL DEFAULT 'intermediate', - sort_order INTEGER NOT NULL DEFAULT 0, + track_type TEXT NOT NULL DEFAULT 'concept', is_published INTEGER NOT NULL DEFAULT 1, + category TEXT NOT NULL DEFAULT 'core', + category_order INTEGER NOT NULL DEFAULT 1, + track_order INTEGER NOT NULL DEFAULT 1, created_at TEXT NOT NULL DEFAULT (datetime('now')), updated_at TEXT NOT NULL DEFAULT (datetime('now')) ); @@ -77,7 +80,7 @@ def db(): objective TEXT NOT NULL DEFAULT '', estimated_time_minutes INTEGER NOT NULL DEFAULT 30, sort_order INTEGER NOT NULL DEFAULT 0, - is_published INTEGER NOT NULL DEFAULT 1, + color TEXT, created_at TEXT NOT NULL DEFAULT (datetime('now')), updated_at TEXT NOT NULL DEFAULT (datetime('now')), PRIMARY KEY (id, track_id), @@ -96,6 +99,21 @@ def db(): FOREIGN KEY (concept_id) REFERENCES concepts(id) ON DELETE CASCADE ); + CREATE TABLE track_resources ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + track_id TEXT NOT NULL, + module_id TEXT NOT NULL, + name TEXT NOT NULL, + url TEXT, + description TEXT, + detail TEXT, + resource_type TEXT NOT NULL DEFAULT 'link', + sort_order INTEGER NOT NULL DEFAULT 0, + metadata_json TEXT, + created_at TEXT NOT NULL DEFAULT (datetime('now')), + FOREIGN KEY (track_id) REFERENCES curriculum_tracks(id) ON DELETE CASCADE + ); + CREATE TABLE concept_progress ( user_id TEXT NOT NULL DEFAULT 'default', concept_id TEXT NOT NULL, @@ -464,12 +482,9 @@ def test_load_ml_foundations_yaml(): track = load_track_file(path) assert track.id == "ml-foundations" - assert len(track.modules) == 3 - assert track.modules[0].id == "vector-similarity" - assert len(track.modules[0].concepts) == 4 - # Normalized IDs - assert track.modules[0].concepts[0].concept_id == "dense-vectors" - assert track.modules[0].concepts[3].concept_id == "vector-embeddings" - # Attention module (narrowed from transformers) - assert track.modules[2].id == "attention" - assert len(track.modules[2].concepts) == 2 + assert len(track.modules) == 9 + assert track.modules[0].id == "numbers-formulas-functions" + assert track.modules[0].concepts[0].concept_id == "ratio" + # Module 8: generalization includes bias-variance-tradeoff + assert track.modules[7].id == "generalization-and-evaluation" + assert track.modules[7].concepts[1].concept_id == "bias-variance-tradeoff" From 58569860cea88fdf007173eff14ef8073d36a9c2 Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Wed, 18 Mar 2026 00:03:19 +0100 Subject: [PATCH 09/10] fix(lint): remove unused import and rename ambiguous variable in merge_concepts Fixes ruff F401 (unused `field` import) and E741 (ambiguous `l` variable). --- backend/jobs/merge_concepts.py | 54 +++++++++++++++++----------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/backend/jobs/merge_concepts.py b/backend/jobs/merge_concepts.py index 5003821..7e434fb 100644 --- a/backend/jobs/merge_concepts.py +++ b/backend/jobs/merge_concepts.py @@ -11,7 +11,7 @@ import argparse import sqlite3 import sys -from dataclasses import dataclass, field +from dataclasses import dataclass from pathlib import Path DB_PATH = Path("data/samaritan.db") @@ -49,21 +49,21 @@ class MergeStats: def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> MergeStats: """Merge loser into winner. Returns stats of what was (or would be) done.""" stats = MergeStats() - w, l = pair.winner, pair.loser + w, loser_id = pair.winner, pair.loser # Verify both exist winner_row = conn.execute("SELECT id, source_count FROM concepts WHERE id = ?", (w,)).fetchone() - loser_row = conn.execute("SELECT id, source_count FROM concepts WHERE id = ?", (l,)).fetchone() + loser_row = conn.execute("SELECT id, source_count FROM concepts WHERE id = ?", (loser_id,)).fetchone() if not winner_row: print(f" ERROR: winner {w!r} not found in concepts table") return stats if not loser_row: - print(f" WARNING: loser {l!r} not found — already merged?") + print(f" WARNING: loser {loser_id!r} not found — already merged?") return stats # 1. Absorb aliases: add loser's aliases pointing to winner (skip dupes) loser_aliases = conn.execute( - "SELECT alias FROM concept_aliases WHERE concept_id = ?", (l,) + "SELECT alias FROM concept_aliases WHERE concept_id = ?", (loser_id,) ).fetchall() winner_aliases = { row[0] @@ -78,7 +78,7 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge # Delete from loser first (PK is alias+concept_id) conn.execute( "DELETE FROM concept_aliases WHERE alias = ? AND concept_id = ?", - (alias, l), + (alias, loser_id), ) conn.execute( "INSERT OR IGNORE INTO concept_aliases (alias, concept_id) VALUES (?, ?)", @@ -89,26 +89,26 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge if execute: conn.execute( "DELETE FROM concept_aliases WHERE alias = ? AND concept_id = ?", - (alias, l), + (alias, loser_id), ) # 2. Move concept_atoms (concept_id) atom_count = conn.execute( - "SELECT COUNT(*) FROM concept_atoms WHERE concept_id = ?", (l,) + "SELECT COUNT(*) FROM concept_atoms WHERE concept_id = ?", (loser_id,) ).fetchone()[0] stats.atoms_moved = atom_count if execute and atom_count: - conn.execute("UPDATE concept_atoms SET concept_id = ? WHERE concept_id = ?", (w, l)) + conn.execute("UPDATE concept_atoms SET concept_id = ? WHERE concept_id = ?", (w, loser_id)) # 3. Move concept_atoms (target_concept_id) target_count = conn.execute( - "SELECT COUNT(*) FROM concept_atoms WHERE target_concept_id = ?", (l,) + "SELECT COUNT(*) FROM concept_atoms WHERE target_concept_id = ?", (loser_id,) ).fetchone()[0] stats.atom_targets_moved = target_count if execute and target_count: conn.execute( "UPDATE concept_atoms SET target_concept_id = ? WHERE target_concept_id = ?", - (w, l), + (w, loser_id), ) # 4. Handle concept_dossiers (1:1 — winner takes priority) @@ -116,7 +116,7 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge "SELECT concept_id FROM concept_dossiers WHERE concept_id = ?", (w,) ).fetchone() loser_dossier = conn.execute( - "SELECT concept_id FROM concept_dossiers WHERE concept_id = ?", (l,) + "SELECT concept_id FROM concept_dossiers WHERE concept_id = ?", (loser_id,) ).fetchone() if loser_dossier: if winner_dossier: @@ -127,17 +127,17 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge if execute: conn.execute( "UPDATE concept_dossiers SET concept_id = ? WHERE concept_id = ?", - (w, l), + (w, loser_id), ) stats.dossiers_deleted = 1 if winner_dossier else 0 if execute and winner_dossier: - conn.execute("DELETE FROM concept_dossiers WHERE concept_id = ?", (l,)) + conn.execute("DELETE FROM concept_dossiers WHERE concept_id = ?", (loser_id,)) # 5. Move concept_prerequisites (both directions, skip dupes) for col in ("concept_id", "prerequisite_id"): other_col = "prerequisite_id" if col == "concept_id" else "concept_id" rows = conn.execute( - f"SELECT {other_col} FROM concept_prerequisites WHERE {col} = ?", (l,) + f"SELECT {other_col} FROM concept_prerequisites WHERE {col} = ?", (loser_id,) ).fetchall() for (other_id,) in rows: # Check if winner already has this edge @@ -156,26 +156,26 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge if execute: conn.execute( f"UPDATE concept_prerequisites SET {col} = ? WHERE {col} = ? AND {other_col} = ?", - (w, l, other_id), + (w, loser_id, other_id), ) else: if execute: conn.execute( f"DELETE FROM concept_prerequisites WHERE {col} = ? AND {other_col} = ?", - (l, other_id), + (loser_id, other_id), ) # 6. Move practice_items practice_count = conn.execute( - "SELECT COUNT(*) FROM practice_items WHERE concept_id = ?", (l,) + "SELECT COUNT(*) FROM practice_items WHERE concept_id = ?", (loser_id,) ).fetchone()[0] stats.practice_moved = practice_count if execute and practice_count: - conn.execute("UPDATE practice_items SET concept_id = ? WHERE concept_id = ?", (w, l)) + conn.execute("UPDATE practice_items SET concept_id = ? WHERE concept_id = ?", (w, loser_id)) # 7. Move concept_progress (skip if winner already has entry for same user) progress_rows = conn.execute( - "SELECT user_id FROM concept_progress WHERE concept_id = ?", (l,) + "SELECT user_id FROM concept_progress WHERE concept_id = ?", (loser_id,) ).fetchall() for (user_id,) in progress_rows: exists = conn.execute( @@ -187,19 +187,19 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge if execute: conn.execute( "UPDATE concept_progress SET concept_id = ? WHERE user_id = ? AND concept_id = ?", - (w, user_id, l), + (w, user_id, loser_id), ) else: if execute: conn.execute( "DELETE FROM concept_progress WHERE user_id = ? AND concept_id = ?", - (user_id, l), + (user_id, loser_id), ) # 8. Move curriculum_module_concepts (skip dupes) curriculum_rows = conn.execute( "SELECT module_id, track_id FROM curriculum_module_concepts WHERE concept_id = ?", - (l,), + (loser_id,), ).fetchall() for module_id, track_id in curriculum_rows: exists = conn.execute( @@ -211,13 +211,13 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge if execute: conn.execute( "UPDATE curriculum_module_concepts SET concept_id = ? WHERE module_id = ? AND track_id = ? AND concept_id = ?", - (w, module_id, track_id, l), + (w, module_id, track_id, loser_id), ) else: if execute: conn.execute( "DELETE FROM curriculum_module_concepts WHERE module_id = ? AND track_id = ? AND concept_id = ?", - (module_id, track_id, l), + (module_id, track_id, loser_id), ) # 9. Update winner source_count (additive from loser, avoid double-count) @@ -232,8 +232,8 @@ def merge_one(conn: sqlite3.Connection, pair: MergePair, execute: bool) -> Merge # 10. Delete remaining loser aliases (cleanup) then loser concept if execute: - conn.execute("DELETE FROM concept_aliases WHERE concept_id = ?", (l,)) - conn.execute("DELETE FROM concepts WHERE id = ?", (l,)) + conn.execute("DELETE FROM concept_aliases WHERE concept_id = ?", (loser_id,)) + conn.execute("DELETE FROM concepts WHERE id = ?", (loser_id,)) return stats From 1e88312a004a47e3d4480cb9a055121436414046 Mon Sep 17 00:00:00 2001 From: scaleborg <218523607+scaleborg@users.noreply.github.com> Date: Wed, 18 Mar 2026 10:33:20 +0100 Subject: [PATCH 10/10] chore: trigger CI with updated Groq API key