import React, { useState, useEffect, useCallback } from 'react'; import { initializeApp } from 'firebase/app'; import { getFirestore, collection, addDoc, query, orderBy, onSnapshot, updateDoc, doc, deleteDoc, serverTimestamp } from 'firebase/firestore'; import { getAuth, signInAnonymously, onAuthStateChanged, User } from 'firebase/auth'; import { Trash2, Play, Plus, Save, RotateCcw, Settings, ArrowUp, ArrowDown, X, Cast, LayoutTemplate } from 'lucide-react'; // --- CONFIGURATION --- const firebaseConfig = JSON.parse(__firebase_config || '{}'); const app = initializeApp(firebaseConfig); const auth = getAuth(app); const db = getFirestore(app); const appId = __app_id || 'default-shaambavi'; // --- TYPES --- type StepType = 'speak' | 'wait' | 'breath' | 'affirmation'; interface MicroStep { id: string; type: StepType; text: string; isCompleted: boolean; } interface Flow { id: string; title: string; originalInput: string; steps: MicroStep[]; createdAt: any; deletedAt: any | null; } // --- GROQ API HELPER (THE BRAIN) --- const generateFlowFromGroq = async (inputText: string, apiKey: string): Promise => { if (!apiKey) throw new Error("Please enter a Groq API Key in Settings."); // UPDATED SYSTEM PROMPT: Neurodivergent Logic + Specific Tone const systemPrompt = ` You are "My Own Shaambavi", an Executive Function Prosthetic for a neurodivergent user (ADHD/Autism). YOUR CORE LOGIC: 1. DECONSTRUCT: The user sees "Clean Kitchen". You see "Stand up", "Walk to kitchen", "Find sponge", "Wet sponge". Break everything down into micro-steps. 2. HIDDEN STEPS: Identify the invisible barriers (transitioning, fetching items). Always put "fetching" before "using". 3. SEQUENCE: Group by location to minimize switching costs. YOUR TONE & PERSONA (CRITICAL): - Grounded, warm, not sugary. Not "parental". - Subordinate but supportive. Phrases: "I'm here", "Proceeding", "Noted", "Ready when you are". - For acknowledgments, use: "Okay", "Understood", "Yes ma'am", "Got it", "Right away". - For Meds: "Sip of water" -> "Tablet in hand" -> "Tablet in mouth" -> "Sip to finish". OUTPUT FORMAT: Strict JSON array of objects. Keys: "type" (speak|wait|breath|affirmation), "text". TYPES: - "speak": An instruction. Short. Clear. - "wait": A pause for user action (use this often after physical steps). Text should be a cue like "Tap when done" or "Tap to proceed". - "breath": A grounding moment. Text: "Deep inhale... and a soft exhale." - "affirmation": Grounding statements like "You are moving through life with intention", "You showed up for yourself", "You're in motion". `; try { const response = await fetch("https://api.groq.com/openai/v1/chat/completions", { method: "POST", headers: { "Authorization": `Bearer ${apiKey}`, "Content-Type": "application/json" }, body: JSON.stringify({ messages: [ { role: "system", content: systemPrompt }, { role: "user", content: inputText } ], model: "llama-3.3-70b-versatile", // Using Llama 3 70B for high intelligence/logic temperature: 0.4, // Lower temperature for more reliable sequencing response_format: { type: "json_object" } }) }); if (!response.ok) { const errData = await response.json(); throw new Error(errData.error?.message || `API Error: ${response.status}`); } const data = await response.json(); const content = data.choices[0]?.message?.content; if (!content) throw new Error("No content received from AI."); const json = JSON.parse(content); const stepsArray = Array.isArray(json) ? json : (json.steps || json.actions || []); return stepsArray.map((s: any) => ({ id: Math.random().toString(36).substr(2, 9), type: s.type || 'speak', text: s.text, isCompleted: false })); } catch (error) { console.error("Groq Error:", error); throw error; } }; // --- COMPONENTS --- // 1. SETTINGS const SettingsModal = ({ apiKey, setApiKey, onClose }: { apiKey: string, setApiKey: (k: string) => void, onClose: () => void }) => (

Settings

setApiKey(e.target.value)} placeholder="gsk_..." className="w-full bg-slate-800 border border-slate-700 text-white rounded p-3 focus:outline-none focus:border-red-500 transition-colors font-sans" />

Required for Llama 3 70B logic.

); // 2. FLOW CREATOR const FlowCreator = ({ apiKey, onSave, onCancel }: { apiKey: string, onSave: (title: string, input: string, steps: MicroStep[]) => void, onCancel: () => void }) => { const [input, setInput] = useState(""); const [title, setTitle] = useState(""); const [loading, setLoading] = useState(false); const [generatedSteps, setGeneratedSteps] = useState([]); const [stepMode, setStepMode] = useState(false); const handleMagic = async () => { if (!input.trim()) return; setLoading(true); try { const steps = await generateFlowFromGroq(input, apiKey); setGeneratedSteps(steps); setStepMode(true); if (!title) setTitle(input.split('\n')[0].substring(0, 25)); } catch (e: any) { alert(`Error: ${e.message}`); } finally { setLoading(false); } }; const moveStep = (index: number, direction: -1 | 1) => { const newSteps = [...generatedSteps]; if (index + direction < 0 || index + direction >= newSteps.length) return; [newSteps[index], newSteps[index + direction]] = [newSteps[index + direction], newSteps[index]]; setGeneratedSteps(newSteps); }; const deleteStep = (index: number) => { const newSteps = generatedSteps.filter((_, i) => i !== index); setGeneratedSteps(newSteps); }; if (stepMode) { return (
setTitle(e.target.value)} className="bg-transparent text-2xl font-bold italic text-white focus:outline-none w-full" placeholder="Name this Flow..." />
{generatedSteps.map((step, idx) => (
{idx + 1}
{step.type}
{ const newSteps = [...generatedSteps]; newSteps[idx].text = e.target.value; setGeneratedSteps(newSteps); }} className="bg-transparent text-slate-200 w-full focus:outline-none font-serif text-lg" />
))}
); } return (

New Flow

setTitle(e.target.value)} placeholder="Title (Optional)" className="bg-slate-900 border border-slate-800 rounded p-4 text-white text-xl italic focus:border-red-500 focus:outline-none placeholder-slate-600" />