From ec71ccc6b2547f57c0f322fbc9e67adc298a8e49 Mon Sep 17 00:00:00 2001 From: Sarthak Karandikar Date: Mon, 12 May 2025 15:19:25 +0530 Subject: [PATCH 1/7] chore: update readme --- README.md | 339 +++++++++++++++++++++--------------------------------- 1 file changed, 131 insertions(+), 208 deletions(-) diff --git a/README.md b/README.md index 76d5a22c..ffc96185 100644 --- a/README.md +++ b/README.md @@ -49,10 +49,8 @@ - [Features](#dart-features) - [Roadmap](#compass-roadmap) - [Getting Started](#toolbox-getting-started) - - [Prerequisites](#bangbang-prerequisites-contributors) - - [Installation](#gear-installation-users) - - [Environment Variables](#-environment-variables-contributors) - - [Run Locally](#running-run-locally-contributors) + - [For Contributors (default branch)](#for-contributors-default-branch) + - [For Self-Hosting (self-host branch)](#for-self-hosting-self-host-branch) - [Usage](#eyes-usage) - [Contributing](#wave-contributing) - [Code of Conduct](#scroll-code-of-conduct) @@ -161,193 +159,145 @@ We at [Existence](https://existence.technology) believe that AI won't simply die ## :toolbox: Getting Started - +Choose your path below. +- **Contributors**: Follow along in the **default** branch. +- **Self-Hosters**: Switch to the **self-host** branch to get the self-hostable version with identical instructions. -### :gear: Installation (Users) +--- -If you're not interested in contributing to the project or self-hosting and simply want to use Sentient, join the [Early Adopters Group](https://chat.whatsapp.com/IOHxuf2W8cKEuyZrMo8DOJ). You can also join our paid waitlist for $3 - to do this, contact [@itsskofficial](https://github.com/itsskofficial). Users on the paid waitlist will be the first to get access to the full cloud version of Sentient via a closed beta. +### For Contributors (default branch) -If you are interested in contributing to the app or simply running the current latest version from source, you can proceed with the following steps 👇 +If you're here to **contribute** to Sentient—adding features, fixing bugs, improving docs—follow these steps on the **default** branch. - +1. **Clone the repo** + ```bash + git clone https://github.com/existence-master/Sentient.git + cd Sentient + ``` -### :bangbang: Prerequisites (Contributors) +2. **Prerequisites** -#### The following instructions are for Linux-based machines, but they remain fundamentally the same for Windows & Mac. Only things like venv configs and activations change on Windows, the rest of the process is pretty much the same. + * **Node.js & npm** + Install from [nodejs.org](https://nodejs.org/en/download). + * **Python 3.11+** + Install from [python.org](https://www.python.org/downloads/). + * **Ollama** (for text models) + Install from [ollama.com](https://ollama.com/). + * **Neo4j Community Edition** (for the knowledge graph) + Download from [neo4j.com](https://neo4j.com/deployment-center/). -Clone the project +3. **Frontend setup** -```bash - git clone https://github.com/existence-master/Sentient.git -``` + ```bash + cd src/client + npm install + ``` -Go to the project directory +4. **Backend setup** -```bash - cd Sentient -``` + ```bash + cd src/server + python3 -m venv venv + source venv/bin/activate + pip install -r requirements.txt + ``` -Install the following to start contributing to Sentient: + > ⚠️ If you encounter numpy errors, first install with the latest numpy (2.x), then downgrade to 1.26.4. -- npm: The ElectronJS frontend of the Sentient desktop app uses npm as its package manager. +5. **Ollama models** - Install the latest version of NodeJS and npm from [here.](https://nodejs.org/en/download) - - After that, install all the required packages. - - ```bash - cd ./src/client && npm install - ``` - -- python: Python will be needed to run the backend. - Install Python [from here.](https://www.python.org/downloads/) We recommend Python 3.11. - - After that, you will need to create a virtual environment and install all required packages. This venv will need to be activated whenever you want to run the Python server (backend). - - ```bash - cd src/server && python3 -m venv venv - cd venv/bin && source activate - cd ../../ && pip install -r requirements.txt - ``` - - `⚠️ If you get a numpy dependency error while installing the requirements, first install the requirements with the latest numpy version (2.x). After the installation of requirements completes, install a numpy 1.x version (backend has been tested and works successfully on numpy 1.26.4) and you will be ready to go. This is probably not the best practise, but this works for now.` - - `⚠️ If you intend to use Advanced Voice Mode, you MUST download and install llama-cpp-python with CUDA support (if you have an NVIDIA GPU) using the commented out pip command in the requirements.txt file. Otherwise, simply download and install the llama-cpp-python package with pip for simple CPU-only support. This line is commented out in the requirements file to allow users to download and install the appropriate version based on their preference (CPU only/GPU accelerated).` - -- Ollama: Download and install the latest version of Ollama [from here.](https://ollama.com/) - - After that, pull the model you wish to use from Ollama. For example, - - ```bash + ```bash ollama pull llama3.2:3b - ``` - - `⚠️ By default, the backend is configured with Llama 3.2 3B. We found this SLM to be really versatile and works really well for our usage, as compared to other SLMs. However a lot of new SLMs like Cogito are being dropped everyday so we will probably be changing the model soon. If you wish to use a different model, simply find all the places where llama3.2:3b has been set in the Python backend scripts and change it to the tag of the model you have pulled from Ollama.` - -- Neo4j Community: Download Neo4j Community Edition [from here.](https://neo4j.com/deployment-center/) - - Next, you will need to enable the APOC plugin. - After extracting Neo4j Community Edition, navigate to the labs folder. Copy the `apoc-x.x.x-core.jar` script to the plugins folder in the Neo4j folder. - Edit the neo4j.conf file to allow the use of APOC procedures: - - ```bash - sudo nano /etc/neo4j/neo4j.conf - ``` - - Uncomment or add the following lines: - - ```ini - dbms.security.procedures.unrestricted=apoc.* - dbms.security.procedures.allowlist=apoc.* - dbms.unmanaged_extension_classes=apoc.export=/apoc - ``` - - You can run Neo4j community using the following commands + ``` - ```bash - cd neo4j/bin && ./neo4j console - ``` + *Tip*: To use another model, update `BASE_MODEL_REPO_ID` in your `.env` accordingly. - While Neo4j is running, you can visit `http://localhost:7474/` to run Cypher Queries and interact with your knowledge graph. +6. **Neo4j APOC plugin** - `⚠️ On your first run of Neo4j Community, you will need to set a username and password. **Remember this password** as you will need to add it to the .env file on the Python backend.` + * Copy `apoc-x.x.x-core.jar` into `neo4j/plugins`. + * In `neo4j/conf/neo4j.conf`: -- Download the Voice Model (Orpheus TTS 3B) + ```ini + dbms.security.procedures.unrestricted=apoc.* + dbms.security.procedures.allowlist=apoc.* + dbms.unmanaged_extension_classes=apoc.export=/apoc + ``` - For using Advanced Voice Mode, you need to manually download [this model](https://huggingface.co/isaiahbjork/orpheus-3b-0.1-ft-Q4_K_M-GGUF) from Huggingface. Whisper is automatically downloaded by Sentient via fasterwhisper. +7. **Environment variables** - The model linked above is a Q4 quantization of the Orpheus 3B model. If you have even more VRAM at your disposal, you can go for the [Q8 quant](https://huggingface.co/Mungert/orpheus-3b-0.1-ft-GGUF). + * **Frontend**: create `src/interface/.env`: - Download the GGUF files - these models are run using llama-cpp-python. + ```env + ELECTRON_APP_URL="http://localhost:3000" + APP_SERVER_URL="http://127.0.0.1:5000" + NEO4J_SERVER_URL="http://localhost:7474" + BASE_MODEL_REPO_ID="llama3.2:3b" + AUTH0_DOMAIN="your-auth0-domain" + AUTH0_CLIENT_ID="your-auth0-client-id" + ``` + * **Backend**: create `src/server/.env`: - Place the model files here: - `src/server/voice/models` + ```env + NEO4J_URI=bolt://localhost:7687 + NEO4J_USERNAME=neo4j + NEO4J_PASSWORD=your-password + EMBEDDING_MODEL_REPO_ID=sentence-transformers/all-MiniLM-L6-v2 + BASE_MODEL_URL=http://localhost:11434/api/chat + BASE_MODEL_REPO_ID=llama3.2:3b + GOOGLE_CLIENT_ID=… + GOOGLE_CLIENT_SECRET=… + BRAVE_SUBSCRIPTION_TOKEN=… + AES_SECRET_KEY=… + AES_IV=… + ``` - and ensure that the correct model name is set in the Python scripts on the backend. By default, the app is configured to use the 8-bit quant using the same name that it has when you download it from HuggingFace. + > ⚠️ If you need example keys, see the discussion “Request Environment Variables” in Issues. - `⚠️ If you do not have enough VRAM and voice mode is not that important to you, you can comment out/remove the voice mode loading functionality in the main app.py located at src/server/app/app.py` +8. **Run everything** - + * Start Neo4j: -### 🔒: Environment Variables (Contributors) + ```bash + cd neo4j/bin && ./neo4j console + ``` + * Start backend: -You will need the following environment variables to run the project locally. For sensitive keys like Auth0, GCP, Brave Search you can create your own accounts and populate your own keys or comment in the discussion titled ['Request Environment Variables (.env) Here'](https://github.com/existence-master/Sentient/discussions/13) if you want pre-setup keys + ```bash + cd src/server + source venv/bin/activate + python -m server.app.app + ``` + * Start Electron client: -For the Electron Frontend, you will need to create a `.env` file in the `src/interface` folder. Populate that `.env` file with the following variables (examples given). + ```bash + cd src/client + npm run dev + ``` -```.emv.template - ELECTRON_APP_URL= "http://localhost:3000" - APP_SERVER_URL= "http://127.0.0.1:5000" - APP_SERVER_LOADED= "false" - APP_SERVER_INITIATED= "false" - NEO4J_SERVER_URL= "http://localhost:7474" - NEO4J_SERVER_STARTED= "false" - BASE_MODEL_REPO_ID= "llama3.2:3b" - AUTH0_DOMAIN = "abcdxyz.us.auth0.com" - AUTH0_CLIENT_ID = "abcd1234" -``` +--- -For the Python Backend, you will need to create a `.env` file and place it in the `src/model` folder. Populate that `.env` file with the following variables (examples given). +### For Self-Hosting (self-host branch) -```.emv.template - NEO4J_URI=bolt://localhost:7687 - NEO4J_USERNAME=neo4j - NEO4J_PASSWORD=abcd1234 - EMBEDDING_MODEL_REPO_ID=sentence-transformers/all-MiniLM-L6-v2 - BASE_MODEL_URL=http://localhost:11434/api/chat - BASE_MODEL_REPO_ID=llama3.2:3b - LINKEDIN_USERNAME=email@address.com - LINKEDIN_PASSWORD=password123 - BRAVE_SUBSCRIPTION_TOKEN=YOUR_TOKEN_HERE - BRAVE_BASE_URL=https://api.search.brave.com/res/v1/web/search - GOOGLE_CLIENT_ID=YOUR_GOOGLE_CLIENT_ID_HERE - GOOGLE_PROJECT_ID=YOUR_PROJECT_ID - GOOGLE_AUTH_URI=https://accounts.google.com/o/oauth2/auth - GOOGLE_TOKEN_URI=https://oauth2.googleapis.com/token - GOOGLE_AUTH_PROVIDER_CERT_URL=https://www.googleapis.com/oauth2/v1/certs - GOOGLE_CLIENT_SECRET=YOUR_SECRET_HERE - GOOGLE_REDIRECT_URIS=http://localhost - AES_SECRET_KEY=YOUR_SECRET_KEY_HERE (256 bits or 32 chars) - AES_IV=YOUR_IV_HERE (256 bits or 32 chars) - AUTH0_DOMAIN=abcdxyz.us.auth0.com - AUTH0_MANAGEMENT_CLIENT_ID=YOUR_MANAGEMENT_CLIENT_ID - AUTH0_MANAGEMENT_CLIENT_SECRET=YOUR_MANAGEMENT_CLIENT_SECRET -``` +If you just want to **self-host** Sentient—no contributions needed—switch to the `self-host` branch. The instructions are **identical** to the contributors guide above, but this branch is tailored for self-hosting deployments. -`⚠️ If you face some issues with Auth0 setup, please contact us via our Whatsapp Group or reach out to one of the lead contributors [@Kabeer2004](https://github.com/Kabeer2004), [@itsskofficial](https://github.com/itsskofficial) or [@abhijeetsuryawanshi12](https://github.com/abhijeetsuryawanshi12)` +1. **Switch branches** - + ```bash + git clone https://github.com/existence-master/Sentient.git + cd Sentient + git checkout self-host + ``` -### :running: Run Locally (Contributors) +2. **Follow all steps** in the **Contributors** section above, starting at “Prerequisites.” -**Install dependencies** + * Install dependencies + * Pull Ollama models + * Configure Neo4j & environment files + * Run Neo4j, backend, and client -Ensure that you have installed all the dependencies as outlined in the [Prerequisites Section](#bangbang-prerequisites). +> **Tip**: The `self-host` branch will always mirror the default branch’s setup instructions. Any updates to installation or configuration in the default branch will be back-ported here for self-hosting users. -**Start Neo4j** - -Start Neo4j Community Edition first. - -```bash -cd neo4j/bin && ./neo4j console -``` - -**Start the Python backend server.** - -```bash - cd src/server/venv/bin/ && source activate - cd ../../ && python -m server.app.app -``` - -Once the Python server has fully started up, start the Electron client. - -```bash - cd src/interface && npm run dev -``` - -`❗ You are free to package and bundle your own versions of the app that may or may not contain any modifications. However, if you do make any modifications, you must comply with the AGPL license and open-source your version as well.` - - +--- ## :eyes: Usage @@ -357,11 +307,11 @@ Sentient is a proactive companion that pulls context from the different apps you Sentient can also do a lot based on simple user commands. -- `"Hey Sentient, help me find a restaurant in Pune based on my food preferences.` -- `What are the upcoming events in my Google Calendar?` -- `Setup a lunch meeting with tom@email.com and add it to my Calendar` -- `Create a pitch deck for my startup in Google Slides and email it to tom@email.com` -- `Help me find new hobbies in my city` +* `"Hey Sentient, help me find a restaurant in Pune based on my food preferences.` +* `What are the upcoming events in my Google Calendar?` +* `Setup a lunch meeting with tom@email.com and add it to my Calendar` +* `Create a pitch deck for my startup in Google Slides and email it to tom@email.com` +* `Help me find new hobbies in my city` 📹 [Check out our ad!](https://www.youtube.com/watch?v=Oeqmg25yqDY) @@ -387,98 +337,71 @@ Please read the [code of conduct](https://github.com/existence-master/Sentient/b ## :grey_question: FAQ -- When will the cloud version launch? +* **When will the cloud version launch?** + We are working as fast as we can to bring it to life! Join our [WhatsApp Community](https://chat.whatsapp.com/IOHxuf2W8cKEuyZrMo8DOJ) to get daily updates and more! - - We are working as fast as we can to bring it to life! Join our [WhatsApp Community](https://chat.whatsapp.com/IOHxuf2W8cKEuyZrMo8DOJ) to get daily updates and more! +* **What data do you collect about me?** + For auth, we have a standard email–password flow provided by Auth0 (also supports Google OAuth). We only collect your email and login history. Read more in our [privacy policy](https://existence-sentient.vercel.app/privacy). -- What data do you collect about me? +* **What hardware do I need?** - - For auth, we have a standard email-password flow provided by Auth0 (also supports Google OAuth). So, the only data we collect is the email provided by users and their login history. This helps us understand how users are using the app, retention rates, daily signups and more. Read more about data collection in our [privacy policy.](https://existence-sentient.vercel.app/privacy). + * **Text mode**: CPU (Intel i5 or equivalent), 8 GB RAM, GPU with 4–6 GB VRAM. + * **Voice mode**: Additional VRAM depending on your Orpheus 3B quant. -- What kind of hardware do I need to run the app locally/self-host it? - - - To run Sentient - any decent CPU (Intel Core i5 or equivalent and above), 8GB of RAM and a GPU with 4-6GB of VRAM should be enough for text only. For voice, additional VRAM will be required based on the quant you choose for Orpheus 3B. A GPU is necessary for fast local model inference. You can self-host/run locally on Windows, Linux or Mac. - -- Why open source? - - - Since the app is going to be processing a lot of your personal information, maintaining transparency of the underlying code and processes is very important. The code needs to be available for everyone to freely view how their data is being managed in the app. We also want developers to be able to contribute to Sentient - they should be able to add missing integrations or features that they feel should be a part of Sentient. They should also be able to freely make their own forks of Sentient for different use-cases, provided they abide by the GNU AGPL license and open-source their work. - -- Why AGPL? - - - We intentionally decided to go with a more restrictive license, specifically AGPL, rather than a permissive license (like MIT or Apache) since we do not want any other closed-source, cloud-based competitors cropping up with our code at its core. Going with AGPL is our way of staying committed to our core principles of transparency and privacy while ensuring that others who use our code also follow the same principles. +* **Why open source & AGPL?** + Transparency around personal data is core to our philosophy. AGPL ensures derivatives also remain open, preventing closed-source forks. ## :warning: License -Distributed under the GNU AGPL License. Check [our lisence](https://github.com/existence-master/Sentient/blob/master/LICENSE.txt) for more information. +Distributed under the GNU AGPL License. See [LICENSE.txt](https://github.com/existence-master/Sentient/blob/master/LICENSE.txt) for details. ## :handshake: Contact -[existence.sentient@gmail.com](existence.sentient@gmail.com) +[existence.sentient@gmail.com](mailto:existence.sentient@gmail.com) ## :gem: Acknowledgements -Sentient wouldn't have been possible without +Sentient wouldn't have been possible without: -- [Ollama](https://ollama.com/) -- [Neo4j](https://neo4j.com/) -- [FastAPI](https://fastapi.tiangolo.com/) -- [Meta's Llama Models](https://www.llama.com/) -- [ElectronJS](https://www.electronjs.org/) -- [Next.js](https://nextjs.org/) +* [Ollama](https://ollama.com/) +* [Neo4j](https://neo4j.com/) +* [FastAPI](https://fastapi.tiangolo.com/) +* [Meta's Llama Models](https://www.llama.com/) +* [ElectronJS](https://www.electronjs.org/) +* [Next.js](https://nextjs.org/) ## :heavy_check_mark: Official Team -The official team behind Sentient - - - - - -

- - - itsskofficial - - + itsskofficial
-

- - - kabeer2004 - - + kabeer2004
-
+
- - - abhijeetsuryawanshi12 - - + abhijeetsuryawanshi12
-
From b0fb822b4bf8842f781752580f19276ae75720ce Mon Sep 17 00:00:00 2001 From: Sarthak Karandikar Date: Mon, 12 May 2025 15:19:25 +0530 Subject: [PATCH 2/7] chore: update readme --- README.md | 339 +++++++++++++++++++++--------------------------------- 1 file changed, 131 insertions(+), 208 deletions(-) diff --git a/README.md b/README.md index 76d5a22c..ffc96185 100644 --- a/README.md +++ b/README.md @@ -49,10 +49,8 @@ - [Features](#dart-features) - [Roadmap](#compass-roadmap) - [Getting Started](#toolbox-getting-started) - - [Prerequisites](#bangbang-prerequisites-contributors) - - [Installation](#gear-installation-users) - - [Environment Variables](#-environment-variables-contributors) - - [Run Locally](#running-run-locally-contributors) + - [For Contributors (default branch)](#for-contributors-default-branch) + - [For Self-Hosting (self-host branch)](#for-self-hosting-self-host-branch) - [Usage](#eyes-usage) - [Contributing](#wave-contributing) - [Code of Conduct](#scroll-code-of-conduct) @@ -161,193 +159,145 @@ We at [Existence](https://existence.technology) believe that AI won't simply die ## :toolbox: Getting Started - +Choose your path below. +- **Contributors**: Follow along in the **default** branch. +- **Self-Hosters**: Switch to the **self-host** branch to get the self-hostable version with identical instructions. -### :gear: Installation (Users) +--- -If you're not interested in contributing to the project or self-hosting and simply want to use Sentient, join the [Early Adopters Group](https://chat.whatsapp.com/IOHxuf2W8cKEuyZrMo8DOJ). You can also join our paid waitlist for $3 - to do this, contact [@itsskofficial](https://github.com/itsskofficial). Users on the paid waitlist will be the first to get access to the full cloud version of Sentient via a closed beta. +### For Contributors (default branch) -If you are interested in contributing to the app or simply running the current latest version from source, you can proceed with the following steps 👇 +If you're here to **contribute** to Sentient—adding features, fixing bugs, improving docs—follow these steps on the **default** branch. - +1. **Clone the repo** + ```bash + git clone https://github.com/existence-master/Sentient.git + cd Sentient + ``` -### :bangbang: Prerequisites (Contributors) +2. **Prerequisites** -#### The following instructions are for Linux-based machines, but they remain fundamentally the same for Windows & Mac. Only things like venv configs and activations change on Windows, the rest of the process is pretty much the same. + * **Node.js & npm** + Install from [nodejs.org](https://nodejs.org/en/download). + * **Python 3.11+** + Install from [python.org](https://www.python.org/downloads/). + * **Ollama** (for text models) + Install from [ollama.com](https://ollama.com/). + * **Neo4j Community Edition** (for the knowledge graph) + Download from [neo4j.com](https://neo4j.com/deployment-center/). -Clone the project +3. **Frontend setup** -```bash - git clone https://github.com/existence-master/Sentient.git -``` + ```bash + cd src/client + npm install + ``` -Go to the project directory +4. **Backend setup** -```bash - cd Sentient -``` + ```bash + cd src/server + python3 -m venv venv + source venv/bin/activate + pip install -r requirements.txt + ``` -Install the following to start contributing to Sentient: + > ⚠️ If you encounter numpy errors, first install with the latest numpy (2.x), then downgrade to 1.26.4. -- npm: The ElectronJS frontend of the Sentient desktop app uses npm as its package manager. +5. **Ollama models** - Install the latest version of NodeJS and npm from [here.](https://nodejs.org/en/download) - - After that, install all the required packages. - - ```bash - cd ./src/client && npm install - ``` - -- python: Python will be needed to run the backend. - Install Python [from here.](https://www.python.org/downloads/) We recommend Python 3.11. - - After that, you will need to create a virtual environment and install all required packages. This venv will need to be activated whenever you want to run the Python server (backend). - - ```bash - cd src/server && python3 -m venv venv - cd venv/bin && source activate - cd ../../ && pip install -r requirements.txt - ``` - - `⚠️ If you get a numpy dependency error while installing the requirements, first install the requirements with the latest numpy version (2.x). After the installation of requirements completes, install a numpy 1.x version (backend has been tested and works successfully on numpy 1.26.4) and you will be ready to go. This is probably not the best practise, but this works for now.` - - `⚠️ If you intend to use Advanced Voice Mode, you MUST download and install llama-cpp-python with CUDA support (if you have an NVIDIA GPU) using the commented out pip command in the requirements.txt file. Otherwise, simply download and install the llama-cpp-python package with pip for simple CPU-only support. This line is commented out in the requirements file to allow users to download and install the appropriate version based on their preference (CPU only/GPU accelerated).` - -- Ollama: Download and install the latest version of Ollama [from here.](https://ollama.com/) - - After that, pull the model you wish to use from Ollama. For example, - - ```bash + ```bash ollama pull llama3.2:3b - ``` - - `⚠️ By default, the backend is configured with Llama 3.2 3B. We found this SLM to be really versatile and works really well for our usage, as compared to other SLMs. However a lot of new SLMs like Cogito are being dropped everyday so we will probably be changing the model soon. If you wish to use a different model, simply find all the places where llama3.2:3b has been set in the Python backend scripts and change it to the tag of the model you have pulled from Ollama.` - -- Neo4j Community: Download Neo4j Community Edition [from here.](https://neo4j.com/deployment-center/) - - Next, you will need to enable the APOC plugin. - After extracting Neo4j Community Edition, navigate to the labs folder. Copy the `apoc-x.x.x-core.jar` script to the plugins folder in the Neo4j folder. - Edit the neo4j.conf file to allow the use of APOC procedures: - - ```bash - sudo nano /etc/neo4j/neo4j.conf - ``` - - Uncomment or add the following lines: - - ```ini - dbms.security.procedures.unrestricted=apoc.* - dbms.security.procedures.allowlist=apoc.* - dbms.unmanaged_extension_classes=apoc.export=/apoc - ``` - - You can run Neo4j community using the following commands + ``` - ```bash - cd neo4j/bin && ./neo4j console - ``` + *Tip*: To use another model, update `BASE_MODEL_REPO_ID` in your `.env` accordingly. - While Neo4j is running, you can visit `http://localhost:7474/` to run Cypher Queries and interact with your knowledge graph. +6. **Neo4j APOC plugin** - `⚠️ On your first run of Neo4j Community, you will need to set a username and password. **Remember this password** as you will need to add it to the .env file on the Python backend.` + * Copy `apoc-x.x.x-core.jar` into `neo4j/plugins`. + * In `neo4j/conf/neo4j.conf`: -- Download the Voice Model (Orpheus TTS 3B) + ```ini + dbms.security.procedures.unrestricted=apoc.* + dbms.security.procedures.allowlist=apoc.* + dbms.unmanaged_extension_classes=apoc.export=/apoc + ``` - For using Advanced Voice Mode, you need to manually download [this model](https://huggingface.co/isaiahbjork/orpheus-3b-0.1-ft-Q4_K_M-GGUF) from Huggingface. Whisper is automatically downloaded by Sentient via fasterwhisper. +7. **Environment variables** - The model linked above is a Q4 quantization of the Orpheus 3B model. If you have even more VRAM at your disposal, you can go for the [Q8 quant](https://huggingface.co/Mungert/orpheus-3b-0.1-ft-GGUF). + * **Frontend**: create `src/interface/.env`: - Download the GGUF files - these models are run using llama-cpp-python. + ```env + ELECTRON_APP_URL="http://localhost:3000" + APP_SERVER_URL="http://127.0.0.1:5000" + NEO4J_SERVER_URL="http://localhost:7474" + BASE_MODEL_REPO_ID="llama3.2:3b" + AUTH0_DOMAIN="your-auth0-domain" + AUTH0_CLIENT_ID="your-auth0-client-id" + ``` + * **Backend**: create `src/server/.env`: - Place the model files here: - `src/server/voice/models` + ```env + NEO4J_URI=bolt://localhost:7687 + NEO4J_USERNAME=neo4j + NEO4J_PASSWORD=your-password + EMBEDDING_MODEL_REPO_ID=sentence-transformers/all-MiniLM-L6-v2 + BASE_MODEL_URL=http://localhost:11434/api/chat + BASE_MODEL_REPO_ID=llama3.2:3b + GOOGLE_CLIENT_ID=… + GOOGLE_CLIENT_SECRET=… + BRAVE_SUBSCRIPTION_TOKEN=… + AES_SECRET_KEY=… + AES_IV=… + ``` - and ensure that the correct model name is set in the Python scripts on the backend. By default, the app is configured to use the 8-bit quant using the same name that it has when you download it from HuggingFace. + > ⚠️ If you need example keys, see the discussion “Request Environment Variables” in Issues. - `⚠️ If you do not have enough VRAM and voice mode is not that important to you, you can comment out/remove the voice mode loading functionality in the main app.py located at src/server/app/app.py` +8. **Run everything** - + * Start Neo4j: -### 🔒: Environment Variables (Contributors) + ```bash + cd neo4j/bin && ./neo4j console + ``` + * Start backend: -You will need the following environment variables to run the project locally. For sensitive keys like Auth0, GCP, Brave Search you can create your own accounts and populate your own keys or comment in the discussion titled ['Request Environment Variables (.env) Here'](https://github.com/existence-master/Sentient/discussions/13) if you want pre-setup keys + ```bash + cd src/server + source venv/bin/activate + python -m server.app.app + ``` + * Start Electron client: -For the Electron Frontend, you will need to create a `.env` file in the `src/interface` folder. Populate that `.env` file with the following variables (examples given). + ```bash + cd src/client + npm run dev + ``` -```.emv.template - ELECTRON_APP_URL= "http://localhost:3000" - APP_SERVER_URL= "http://127.0.0.1:5000" - APP_SERVER_LOADED= "false" - APP_SERVER_INITIATED= "false" - NEO4J_SERVER_URL= "http://localhost:7474" - NEO4J_SERVER_STARTED= "false" - BASE_MODEL_REPO_ID= "llama3.2:3b" - AUTH0_DOMAIN = "abcdxyz.us.auth0.com" - AUTH0_CLIENT_ID = "abcd1234" -``` +--- -For the Python Backend, you will need to create a `.env` file and place it in the `src/model` folder. Populate that `.env` file with the following variables (examples given). +### For Self-Hosting (self-host branch) -```.emv.template - NEO4J_URI=bolt://localhost:7687 - NEO4J_USERNAME=neo4j - NEO4J_PASSWORD=abcd1234 - EMBEDDING_MODEL_REPO_ID=sentence-transformers/all-MiniLM-L6-v2 - BASE_MODEL_URL=http://localhost:11434/api/chat - BASE_MODEL_REPO_ID=llama3.2:3b - LINKEDIN_USERNAME=email@address.com - LINKEDIN_PASSWORD=password123 - BRAVE_SUBSCRIPTION_TOKEN=YOUR_TOKEN_HERE - BRAVE_BASE_URL=https://api.search.brave.com/res/v1/web/search - GOOGLE_CLIENT_ID=YOUR_GOOGLE_CLIENT_ID_HERE - GOOGLE_PROJECT_ID=YOUR_PROJECT_ID - GOOGLE_AUTH_URI=https://accounts.google.com/o/oauth2/auth - GOOGLE_TOKEN_URI=https://oauth2.googleapis.com/token - GOOGLE_AUTH_PROVIDER_CERT_URL=https://www.googleapis.com/oauth2/v1/certs - GOOGLE_CLIENT_SECRET=YOUR_SECRET_HERE - GOOGLE_REDIRECT_URIS=http://localhost - AES_SECRET_KEY=YOUR_SECRET_KEY_HERE (256 bits or 32 chars) - AES_IV=YOUR_IV_HERE (256 bits or 32 chars) - AUTH0_DOMAIN=abcdxyz.us.auth0.com - AUTH0_MANAGEMENT_CLIENT_ID=YOUR_MANAGEMENT_CLIENT_ID - AUTH0_MANAGEMENT_CLIENT_SECRET=YOUR_MANAGEMENT_CLIENT_SECRET -``` +If you just want to **self-host** Sentient—no contributions needed—switch to the `self-host` branch. The instructions are **identical** to the contributors guide above, but this branch is tailored for self-hosting deployments. -`⚠️ If you face some issues with Auth0 setup, please contact us via our Whatsapp Group or reach out to one of the lead contributors [@Kabeer2004](https://github.com/Kabeer2004), [@itsskofficial](https://github.com/itsskofficial) or [@abhijeetsuryawanshi12](https://github.com/abhijeetsuryawanshi12)` +1. **Switch branches** - + ```bash + git clone https://github.com/existence-master/Sentient.git + cd Sentient + git checkout self-host + ``` -### :running: Run Locally (Contributors) +2. **Follow all steps** in the **Contributors** section above, starting at “Prerequisites.” -**Install dependencies** + * Install dependencies + * Pull Ollama models + * Configure Neo4j & environment files + * Run Neo4j, backend, and client -Ensure that you have installed all the dependencies as outlined in the [Prerequisites Section](#bangbang-prerequisites). +> **Tip**: The `self-host` branch will always mirror the default branch’s setup instructions. Any updates to installation or configuration in the default branch will be back-ported here for self-hosting users. -**Start Neo4j** - -Start Neo4j Community Edition first. - -```bash -cd neo4j/bin && ./neo4j console -``` - -**Start the Python backend server.** - -```bash - cd src/server/venv/bin/ && source activate - cd ../../ && python -m server.app.app -``` - -Once the Python server has fully started up, start the Electron client. - -```bash - cd src/interface && npm run dev -``` - -`❗ You are free to package and bundle your own versions of the app that may or may not contain any modifications. However, if you do make any modifications, you must comply with the AGPL license and open-source your version as well.` - - +--- ## :eyes: Usage @@ -357,11 +307,11 @@ Sentient is a proactive companion that pulls context from the different apps you Sentient can also do a lot based on simple user commands. -- `"Hey Sentient, help me find a restaurant in Pune based on my food preferences.` -- `What are the upcoming events in my Google Calendar?` -- `Setup a lunch meeting with tom@email.com and add it to my Calendar` -- `Create a pitch deck for my startup in Google Slides and email it to tom@email.com` -- `Help me find new hobbies in my city` +* `"Hey Sentient, help me find a restaurant in Pune based on my food preferences.` +* `What are the upcoming events in my Google Calendar?` +* `Setup a lunch meeting with tom@email.com and add it to my Calendar` +* `Create a pitch deck for my startup in Google Slides and email it to tom@email.com` +* `Help me find new hobbies in my city` 📹 [Check out our ad!](https://www.youtube.com/watch?v=Oeqmg25yqDY) @@ -387,98 +337,71 @@ Please read the [code of conduct](https://github.com/existence-master/Sentient/b ## :grey_question: FAQ -- When will the cloud version launch? +* **When will the cloud version launch?** + We are working as fast as we can to bring it to life! Join our [WhatsApp Community](https://chat.whatsapp.com/IOHxuf2W8cKEuyZrMo8DOJ) to get daily updates and more! - - We are working as fast as we can to bring it to life! Join our [WhatsApp Community](https://chat.whatsapp.com/IOHxuf2W8cKEuyZrMo8DOJ) to get daily updates and more! +* **What data do you collect about me?** + For auth, we have a standard email–password flow provided by Auth0 (also supports Google OAuth). We only collect your email and login history. Read more in our [privacy policy](https://existence-sentient.vercel.app/privacy). -- What data do you collect about me? +* **What hardware do I need?** - - For auth, we have a standard email-password flow provided by Auth0 (also supports Google OAuth). So, the only data we collect is the email provided by users and their login history. This helps us understand how users are using the app, retention rates, daily signups and more. Read more about data collection in our [privacy policy.](https://existence-sentient.vercel.app/privacy). + * **Text mode**: CPU (Intel i5 or equivalent), 8 GB RAM, GPU with 4–6 GB VRAM. + * **Voice mode**: Additional VRAM depending on your Orpheus 3B quant. -- What kind of hardware do I need to run the app locally/self-host it? - - - To run Sentient - any decent CPU (Intel Core i5 or equivalent and above), 8GB of RAM and a GPU with 4-6GB of VRAM should be enough for text only. For voice, additional VRAM will be required based on the quant you choose for Orpheus 3B. A GPU is necessary for fast local model inference. You can self-host/run locally on Windows, Linux or Mac. - -- Why open source? - - - Since the app is going to be processing a lot of your personal information, maintaining transparency of the underlying code and processes is very important. The code needs to be available for everyone to freely view how their data is being managed in the app. We also want developers to be able to contribute to Sentient - they should be able to add missing integrations or features that they feel should be a part of Sentient. They should also be able to freely make their own forks of Sentient for different use-cases, provided they abide by the GNU AGPL license and open-source their work. - -- Why AGPL? - - - We intentionally decided to go with a more restrictive license, specifically AGPL, rather than a permissive license (like MIT or Apache) since we do not want any other closed-source, cloud-based competitors cropping up with our code at its core. Going with AGPL is our way of staying committed to our core principles of transparency and privacy while ensuring that others who use our code also follow the same principles. +* **Why open source & AGPL?** + Transparency around personal data is core to our philosophy. AGPL ensures derivatives also remain open, preventing closed-source forks. ## :warning: License -Distributed under the GNU AGPL License. Check [our lisence](https://github.com/existence-master/Sentient/blob/master/LICENSE.txt) for more information. +Distributed under the GNU AGPL License. See [LICENSE.txt](https://github.com/existence-master/Sentient/blob/master/LICENSE.txt) for details. ## :handshake: Contact -[existence.sentient@gmail.com](existence.sentient@gmail.com) +[existence.sentient@gmail.com](mailto:existence.sentient@gmail.com) ## :gem: Acknowledgements -Sentient wouldn't have been possible without +Sentient wouldn't have been possible without: -- [Ollama](https://ollama.com/) -- [Neo4j](https://neo4j.com/) -- [FastAPI](https://fastapi.tiangolo.com/) -- [Meta's Llama Models](https://www.llama.com/) -- [ElectronJS](https://www.electronjs.org/) -- [Next.js](https://nextjs.org/) +* [Ollama](https://ollama.com/) +* [Neo4j](https://neo4j.com/) +* [FastAPI](https://fastapi.tiangolo.com/) +* [Meta's Llama Models](https://www.llama.com/) +* [ElectronJS](https://www.electronjs.org/) +* [Next.js](https://nextjs.org/) ## :heavy_check_mark: Official Team -The official team behind Sentient - - - - - -

- - - itsskofficial - - + itsskofficial
-

- - - kabeer2004 - - + kabeer2004
-
+
- - - abhijeetsuryawanshi12 - - + abhijeetsuryawanshi12
-
From 5a237783242c5322b7c346cac47366a214ccfd80 Mon Sep 17 00:00:00 2001 From: Sarthak Karandikar Date: Fri, 16 May 2025 18:55:03 +0530 Subject: [PATCH 3/7] chore: update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ffc96185..55cc160f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![README Banner](./.github/assets/banner.png) -

Your personal, private & interactive AI companion

+

Your proactive AI companion

From 79c6304f7cc9e7ee1b4dc27afa746bc8a6923d48 Mon Sep 17 00:00:00 2001 From: Sarthak Karandikar Date: Fri, 16 May 2025 18:55:03 +0530 Subject: [PATCH 4/7] chore: update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ffc96185..55cc160f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![README Banner](./.github/assets/banner.png) -

Your personal, private & interactive AI companion

+

Your proactive AI companion

From c78226678cc3f1214fc4e8bad9716a45b0052b31 Mon Sep 17 00:00:00 2001 From: Sarthak Karandikar Date: Mon, 28 Jul 2025 22:58:35 +0530 Subject: [PATCH 5/7] Update README.md --- README.md | 106 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index d03998d7..e6e910d8 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![README Banner](./.github/assets/banner.png) -

Proactive Intelligence Across Your Apps

+

Sentient: Your Personal AI Assistant

@@ -38,82 +38,89 @@
-> Sentient is an open-source AI project aimed at bridging the gap between input context and output actions performed by agents. AI Agents heavily rely on input prompts to perform actions. We wish to _eliminate prompting entirely_ making the first big step towards truly autonomous AI that is aligned with a user's goals and can get stuff done without needing to context-switch between multiple apps and typing long prompts. +> Hey there! I'm Sarthak, and I'm building **Sentient**: a personal AI assistant for anyone and everyone who wants to live their life more productively. +> +> Sentient acts as your central command center, bridging the gap between your goals and the actions required to achieve them. It is designed to be a truly proactive partner that understands you, manages your digital life, and gets things done—without you having to type long, complex prompts. +> +> It can: +> - **💬 Chat with you** about any topic via text or voice. +> - **🧠 Learn your preferences, habits, and goals** to better serve you over time. +> - **⚙️ Execute complex, multi-step tasks** and recurring workflows. +> - **🗓️ Proactively manage your day**, reading your emails and calendar to suggest schedules and remind you of important events. +> - **🔗 Integrate seamlessly** with the apps you use every day. +> +> And the best part? **The project is fully open-source.** > > [Read our manifesto.](https://docs.google.com/document/d/1vbCGAbh9f8vXfPup_Z7cW__gnOLdRhEtHKyoIxJD8is/edit?tab=t.0#heading=h.2kit9yqvlc77) --- -## ✨ Current Features - -![image](https://github.com/user-attachments/assets/756c8aeb-1748-445c-a09a-df6d99aeee58) -

-

The Home Page

+

đź’¬ Join our WhatsApp Community! đź’¬

+

Interested in trying Sentient out? Join our community to get the latest updates, ask questions, and connect with the team.

-![Journl](https://github.com/user-attachments/assets/467fd26d-18a4-4107-98a9-05fa83b26a77) +--- -
-

The Journal page is the central page of the app - use it to track your day and Sentient gets stuff done.

-
+## ✨ Features -![image](https://github.com/user-attachments/assets/fcf05b39-7f8d-46f8-9702-e0791f27f918) +Sentient is a powerful, web-based platform designed for seamless interaction, automation, and intelligence. + +image
-

Sentient co-authors the journal with you.

+

The Home page is your central chat interface to talk with your AI assistant.

-![image](https://github.com/user-attachments/assets/319d7c35-9046-4ea1-a369-88ab1a9ded8e) +image
-

Use the Tasks page to create and manage workflows.

+

The Tasks page gives you a unified view of all your tasks, where the AI assists in execution.

-Sentient has evolved into a powerful web-based platform with a robust set of features designed for deep integration and automation: - -### 🧠 Proactive Context & Learning +image -Sentient automatically collects information from connected applications like **Gmail** and **Google Calendar**. It extracts relevant context, identifying important facts to remember and also creates plans to tackle action items - without needing to be prompted. - -### 📝 Memory System +
+

The Integrations page is where you connect all your apps.

+
-- **SuperMemory:** Permanent facts about you—your preferences, relationships, and key details—are stored and managed through an integration with **Supermemory**, creating a rich, personalized knowledge base that the agent can update and retrieve from anytime. -- **Notes & Journal:** A full-featured journal allows you to simply write down what's on your mind and have Sentient manage it for you. Sentient can also write to this journal, giving you updates on what it's doing and more. The journal also helps you keep track of scheduled and recurring tasks created by Sentient. Any information obtained from your context sources is also populated in the journal. +image -### 🤖 Autonomous Task & Agent System +
+

The Settings page is where you can customize the application.

+
-- **Generate Plans from Goals:** Sentient can generate detailed plans to execute tasks using connected tools, all from a simple high-level goal. -- **Asynchronous Execution:** Once approved, tasks are handled **asynchronously** in the background - you can approve as many tasks as you want simultaneously. The executor agent intelligently uses the available tools to complete the plan, providing real-time progress updates. -- **View & Manage Tasks:** A dedicated **Tasks page** lets you view active, pending, and completed tasks, check their progress, and see the final results. -### 🔌 Extensive Integrations (MCP Hub) +### 💬 Unified Chat Interface +The home page is a universal chat screen where you can talk with Sentient about anything. Use **text or voice** to ask questions, give commands, or simply have a conversation. The chat is also supercharged with tools like Internet Search, Weather, News, and Shopping for any specific queries. -Our **Model Context Protocol (MCP)** hub allows for a powerful, distributed system of tools. Current integrations include: +### 🤖 Autonomous Task Management +The **Tasks page** is your mission control center. Here you can add, view, and manage all your to-dos. +- **AI-Assisted Execution:** Describe a high-level goal, and Sentient will generate a detailed, step-by-step plan to achieve it using its integrated tools. +- **Asynchronous Workflows:** Approve a plan, and Sentient gets to work in the background, handling complex, multi-step workflows without interrupting you. You can monitor progress in real-time. +- **Unified View:** Track active, pending, and completed tasks all in one place. +### 🔌 Seamless Integrations +The **Integrations page** is where you connect Sentient to your digital life. Our **Model Context Protocol (MCP)** hub allows for a powerful, distributed system of tools. Current integrations include: - **Google Suite:** Gmail, Google Calendar, Google Drive, Google Docs, Google Sheets, and Google Slides. - **Productivity:** Slack and Notion. - **Developer:** GitHub. - **Information:** Internet Search (Google Search), News (NewsAPI), Weather (AccuWeather), Google Shopping and Google Maps. - **Miscellaneous:** QuickChart for generating charts on the fly. -More tools will be added soon. +### 🧠 Proactive Intelligence & Learning +Sentient doesn't just wait for commands. It proactively scans connected apps like **Gmail** and **Google Calendar** to understand your schedule and priorities. +- **Contextual Awareness:** It identifies action items, suggests tasks, and learns important facts about you. +- **Personalized Memory:** Key details about your preferences, relationships, and goals are stored via an integration with **Supermemory**, creating a rich, personalized knowledge base that helps the agent serve you better over time. -### 💬 Interactive Chat Overlay - -A chat interface is available on any page. It allows you to have conversations with Sentient and also use tools like Internet Search, Weather, News and Shopping for any specific queries. - -### ⚙️ Full Customization & Settings - -A central settings page gives you complete control: - -- Connect or disconnect applications with OAuth (for applications supporting OAuth) or manually. -- Set custom privacy filters to prevent Sentient from processing context containing sensitive information. -- Configure WhatsApp notifications to stay updated on the go. +### ⚙️ Full Customization +The **Settings page** gives you complete control over your agent. +- **Manage Connections:** Easily connect or disconnect your apps. +- **Privacy Filters:** Set custom filters to prevent Sentient from processing context containing sensitive information. +- **Notifications:** Configure WhatsApp notifications to stay updated on the go. ### 🔒 Self-Hostable - -The entire platform can be self-hosted and configured to run fully locally. [Check the relevant docs for more info.](https://sentient-2.gitbook.io/docs/getting-started/running-sentient-from-source-self-host) +The entire platform is open-source and can be self-hosted and configured to run fully locally, ensuring your data stays private. [Check the relevant docs for more info.](https://sentient-2.gitbook.io/docs/getting-started/running-sentient-from-source-self-host) --- @@ -121,12 +128,11 @@ The entire platform can be self-hosted and configured to run fully locally. [Che We are constantly working to expand Sentient's capabilities. Here is a glimpse of what's planned for the future: -- **Make the Web App as feature-rich as possible:** There is a lot that can be improved in the existing webapp. -- **OS-Level Integration:** Launch native apps for `Windows`, `MacOS`, `Android` and `iOS` that allow for deeper integrations. -- **Expanded Integrations:** Add support for more popular services, such as the `Microsoft 365 Suite`, `Spotify`, and so on. -- **Advanced Reasoning & Planning:** Reasoning improvements for the planning and execution pipeline. -- **Tool-Specific UI:** Enhance the interface with custom UI components for specific tool outputs, such as maps for location-based results. -- **Custom Tool Integrations:** Let users add any app of their choice. +- **OS-Level Integration:** Launch native apps for `Windows`, `MacOS`, `Android` and `iOS` for deeper, more proactive assistance. +- **Expanded Integrations:** Add support for more popular services, such as the `Microsoft 365 Suite`, `Spotify`, and more. +- **Advanced Conversational AI:** Enhance the chat experience with more natural voice interactions, better memory, and more sophisticated reasoning. +- **Richer Task Execution:** Improve the planning and execution pipeline and provide richer visual feedback for tasks. +- **Custom Tool Integrations:** Create a framework that allows users to easily add any app of their choice. ## :wave: Contributing @@ -166,7 +172,7 @@ Distributed under the GNU AGPL License. See [LICENSE.txt](https://github.com/exi
- itsskofficial + itsskofficial (Sarthak)
From 441c4875ace8b5f1133dfc4e555ba9ac74e1bdfc Mon Sep 17 00:00:00 2001 From: Sarthak Karandikar Date: Mon, 28 Jul 2025 23:00:53 +0530 Subject: [PATCH 6/7] Update README.md --- README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index e6e910d8..e76666a2 100644 --- a/README.md +++ b/README.md @@ -38,9 +38,9 @@
-> Hey there! I'm Sarthak, and I'm building **Sentient**: a personal AI assistant for anyone and everyone who wants to live their life more productively. +> **Sentient** is a personal AI assistant for anyone and everyone who wants to live their life more productively. > -> Sentient acts as your central command center, bridging the gap between your goals and the actions required to achieve them. It is designed to be a truly proactive partner that understands you, manages your digital life, and gets things done—without you having to type long, complex prompts. +> It acts as your central command center, bridging the gap between your goals and the actions required to achieve them. It is designed to be a truly proactive partner that understands you, manages your digital life, and gets things done—without you having to type long, complex prompts. > > It can: > - **💬 Chat with you** about any topic via text or voice. @@ -49,9 +49,7 @@ > - **🗓️ Proactively manage your day**, reading your emails and calendar to suggest schedules and remind you of important events. > - **🔗 Integrate seamlessly** with the apps you use every day. > -> And the best part? **The project is fully open-source.** -> -> [Read our manifesto.](https://docs.google.com/document/d/1vbCGAbh9f8vXfPup_Z7cW__gnOLdRhEtHKyoIxJD8is/edit?tab=t.0#heading=h.2kit9yqvlc77) +> For more information [read our manifesto.](https://docs.google.com/document/d/1vbCGAbh9f8vXfPup_Z7cW__gnOLdRhEtHKyoIxJD8is/edit?tab=t.0#heading=h.2kit9yqvlc77) --- From 8bf846ede6ce5b18f401290accfff1a0c72a69e4 Mon Sep 17 00:00:00 2001 From: IanOS-AI Date: Fri, 13 Feb 2026 19:54:05 -0500 Subject: [PATCH 7/7] chore: add repo baseline (mise, CLAUDE, smoke) --- CLAUDE.md | 16 ++++++++++++++++ mise.toml | 9 +++++++++ package.json | 8 +++++--- scripts/smoke.sh | 8 ++++++++ src/client/package.json | 3 ++- 5 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 CLAUDE.md create mode 100644 mise.toml create mode 100755 scripts/smoke.sh diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..6c1039d9 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,16 @@ +# CLAUDE.md + +## Setup +mise install +mise run install + +## Run +npm run dev + +## Smoke +mise run smoke + +## Rules +- Touch one service area at a time (client or server). +- Keep docker/selfhost files valid. +- Run smoke before commit. diff --git a/mise.toml b/mise.toml new file mode 100644 index 00000000..a5ffe824 --- /dev/null +++ b/mise.toml @@ -0,0 +1,9 @@ +[tools] +node = "24.13.1" +python = "3.14" + +[tasks.install] +run = "npm install --no-package-lock && (cd src/client && npm install)" + +[tasks.smoke] +run = "bash scripts/smoke.sh" diff --git a/package.json b/package.json index c65d685e..d91fec93 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,8 @@ "start": "next start", "lint": "next lint", "format": "prettier --write .", - "format:check": "prettier --check ." + "format:check": "prettier --check .", + "smoke": "bash scripts/smoke.sh" }, "dependencies": { "next": "^14.2.0", @@ -28,5 +29,6 @@ "postcss": "^8.0.0", "prettier": "^3.2.5", "tailwindcss": "^3.0.0" - } -} \ No newline at end of file + }, + "packageManager": "npm@11.10.0" +} diff --git a/scripts/smoke.sh b/scripts/smoke.sh new file mode 100755 index 00000000..0904262e --- /dev/null +++ b/scripts/smoke.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail +cd "$(dirname "$0")/.." +npm install --no-package-lock +if [ -f src/client/package.json ]; then + (cd src/client && npm install && npm run build --if-present) +fi +echo "sentient smoke passed" diff --git a/src/client/package.json b/src/client/package.json index 683b6926..818dc7ef 100644 --- a/src/client/package.json +++ b/src/client/package.json @@ -87,5 +87,6 @@ "prettier": "^3.2.5", "tailwindcss": "^4.0.7", "typescript": "^5.5.0" - } + }, + "packageManager": "npm@11.10.0" }