From 848c26ed4139eac3d24fc3bb7a98b2baf5d5e34a Mon Sep 17 00:00:00 2001
From: Prins Kumar
Date: Tue, 10 Mar 2026 21:19:07 +0530
Subject: [PATCH 1/3] feat: Configuration pages, role-based access to pages
---
common/config.py | 302 +++-
common/embeddings/embedding_services.py | 5 +-
.../llm_services/google_vertexai_service.py | 5 +-
common/requirements.txt | 1 +
docs/tutorials/configs/nginx.conf | 10 +
ecc/app/common | 1 -
ecc/app/configs | 1 -
ecc/app/ecc_util.py | 43 +-
ecc/app/graphrag/community_summarizer.py | 45 +-
ecc/app/graphrag/workers.py | 2 +-
ecc/app/main.py | 47 +
graphrag-ui/src/components/Bot.tsx | 1 +
graphrag-ui/src/components/ModeToggle.tsx | 82 +-
graphrag-ui/src/main.tsx | 46 +-
.../src/pages/setup/CustomizePrompts.tsx | 298 ++++
graphrag-ui/src/pages/setup/GraphDBConfig.tsx | 437 ++++++
.../src/pages/setup/GraphRAGConfig.tsx | 408 +++++
graphrag-ui/src/pages/setup/IngestGraph.tsx | 1392 +++++++++++++++++
graphrag-ui/src/pages/setup/KGAdmin.tsx | 595 +++++++
graphrag-ui/src/pages/setup/LLMConfig.tsx | 1344 ++++++++++++++++
graphrag-ui/src/pages/setup/SetupLayout.tsx | 260 +++
graphrag/app/agent/agent.py | 45 +-
graphrag/app/common | 1 -
graphrag/app/configs | 1 -
graphrag/app/routers/ui.py | 901 ++++++++++-
25 files changed, 6164 insertions(+), 109 deletions(-)
delete mode 120000 ecc/app/common
delete mode 120000 ecc/app/configs
create mode 100644 graphrag-ui/src/pages/setup/CustomizePrompts.tsx
create mode 100644 graphrag-ui/src/pages/setup/GraphDBConfig.tsx
create mode 100644 graphrag-ui/src/pages/setup/GraphRAGConfig.tsx
create mode 100644 graphrag-ui/src/pages/setup/IngestGraph.tsx
create mode 100644 graphrag-ui/src/pages/setup/KGAdmin.tsx
create mode 100644 graphrag-ui/src/pages/setup/LLMConfig.tsx
create mode 100644 graphrag-ui/src/pages/setup/SetupLayout.tsx
delete mode 120000 graphrag/app/common
delete mode 120000 graphrag/app/configs
diff --git a/common/config.py b/common/config.py
index 18a4288..a611594 100644
--- a/common/config.py
+++ b/common/config.py
@@ -13,9 +13,12 @@
# limitations under the License.
import json
+import logging
import os
from fastapi.security import HTTPBasic
+
+logger = logging.getLogger(__name__)
from pyTigerGraph import TigerGraphConnection
from common.embeddings.embedding_services import (
@@ -51,6 +54,34 @@
# Configs
SERVER_CONFIG = os.getenv("SERVER_CONFIG", "configs/server_config.json")
+
+
+def get_server_config_path(graphname=None):
+ """Return graph-specific server config path if it exists, else the default."""
+ if graphname:
+ graph_path = f"configs/{graphname}/server_config.json"
+ if os.path.exists(graph_path):
+ return graph_path
+ return SERVER_CONFIG
+
+
+def get_completion_config(graphname=None):
+ """
+ Return completion_service config for the given graph.
+ Uses configs/{graphname}/server_config.json if it exists, else falls back to default.
+ Auth credentials always come from the live default config so key rotations propagate.
+ """
+ config_path = get_server_config_path(graphname)
+ if config_path != SERVER_CONFIG:
+ logger.info(f"[get_completion_config] graph={graphname} using graph-specific config: {config_path}")
+ with open(config_path, "r") as f:
+ graph_config = json.load(f)
+ graph_completion = graph_config.get("llm_config", {}).get("completion_service", {}).copy()
+ if "authentication_configuration" in llm_config:
+ graph_completion["authentication_configuration"] = llm_config["authentication_configuration"]
+ return graph_completion
+ logger.info(f"[get_completion_config] graph={graphname} using default config")
+ return llm_config["completion_service"].copy()
PATH_PREFIX = os.getenv("PATH_PREFIX", "")
PRODUCTION = os.getenv("PRODUCTION", "false").lower() == "true"
@@ -83,13 +114,25 @@
if llm_config is None:
raise Exception("llm_config is not found in SERVER_CONFIG")
+# Inject authentication_configuration into service configs so they have everything they need
+if "authentication_configuration" in llm_config:
+ if "completion_service" in llm_config:
+ llm_config["completion_service"]["authentication_configuration"] = llm_config["authentication_configuration"]
+ if "embedding_service" in llm_config:
+ llm_config["embedding_service"]["authentication_configuration"] = llm_config["authentication_configuration"]
+ if "multimodal_service" in llm_config:
+ llm_config["multimodal_service"]["authentication_configuration"] = llm_config["authentication_configuration"]
+
completion_config = llm_config.get("completion_service")
if completion_config is None:
raise Exception("completion_service is not found in llm_config")
-if "llm_service" not in completion_config:
- raise Exception("llm_service is not found in completion_service")
-if "llm_model" not in completion_config:
- raise Exception("llm_model is not found in completion_service")
+
+# Log which model will be used for chatbot and ECC/GraphRAG
+if "chatbot_llm" in completion_config:
+ logger.info(f"[CHATBOT] Using chatbot_llm: {completion_config['chatbot_llm']} (Provider: {completion_config['llm_service']})")
+ logger.info(f"[ECC/GraphRAG] Using llm_model: {completion_config['llm_model']} (Provider: {completion_config['llm_service']})")
+else:
+ logger.info(f"[CHATBOT & ECC/GraphRAG] Using llm_model: {completion_config['llm_model']} (Provider: {completion_config['llm_service']})")
embedding_config = llm_config.get("embedding_service")
if embedding_config is None:
raise Exception("embedding_service is not found in llm_config")
@@ -99,6 +142,9 @@
raise Exception("model_name is not found in embedding_service")
embedding_dimension = embedding_config.get("dimensions", 1536)
+# Log which embedding model will be used
+logger.info(f"[EMBEDDING] Using model: {embedding_config.get('model_name', 'N/A')} (Provider: {embedding_config.get('embedding_model_service', 'N/A')})")
+
# Get context window size from llm_config
# <=0 means unlimited tokens (no truncation), otherwise use the specified limit
if "token_limit" in llm_config:
@@ -109,6 +155,8 @@
# Get multimodal_service config (optional, for vision/image tasks)
multimodal_config = llm_config.get("multimodal_service")
+if multimodal_config:
+ logger.info(f"[MULTIMODAL] Using model: {multimodal_config.get('llm_model', 'N/A')} (Provider: {multimodal_config.get('llm_service', 'N/A')})")
# Merge shared authentication configuration from llm_config level into service configs
# Services can still override by defining their own authentication_configuration
@@ -175,27 +223,44 @@
else:
raise Exception("Embedding service not implemented")
-def get_llm_service(llm_config) -> LLM_Model:
- if llm_config["completion_service"]["llm_service"].lower() == "openai":
- return OpenAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "azure":
- return AzureOpenAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "sagemaker":
- return AWS_SageMaker_Endpoint(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "vertexai":
- return GoogleVertexAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "genai":
- return GoogleGenAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "bedrock":
- return AWSBedrock(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "groq":
- return Groq(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "ollama":
- return Ollama(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "huggingface":
- return HuggingFaceEndpoint(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "watsonx":
- return IBMWatsonX(llm_config["completion_service"])
+def get_llm_service(llm_config, for_chatbot=False) -> LLM_Model:
+ """
+ Get LLM service for either Chatbot or GraphRAG/ECC tasks.
+
+ Args:
+ llm_config: The LLM configuration dictionary
+ for_chatbot: If True, uses chatbot_llm if specified, otherwise uses llm_model.
+ If False (default), always uses llm_model for ECC/GraphRAG.
+ """
+ # Use completion_service which already has authentication_configuration injected
+ service_config = llm_config["completion_service"].copy()
+
+ # For chatbot: use chatbot_llm if specified, otherwise use llm_model
+ # For ECC/GraphRAG: always use llm_model
+ if for_chatbot and "chatbot_llm" in service_config:
+ service_config["llm_model"] = service_config["chatbot_llm"]
+ # If llm_model doesn't exist, it will raise KeyError in the service constructor
+
+ if service_config["llm_service"].lower() == "openai":
+ return OpenAI(service_config)
+ elif service_config["llm_service"].lower() == "azure":
+ return AzureOpenAI(service_config)
+ elif service_config["llm_service"].lower() == "sagemaker":
+ return AWS_SageMaker_Endpoint(service_config)
+ elif service_config["llm_service"].lower() == "vertexai":
+ return GoogleVertexAI(service_config)
+ elif service_config["llm_service"].lower() == "genai":
+ return GoogleGenAI(service_config)
+ elif service_config["llm_service"].lower() == "bedrock":
+ return AWSBedrock(service_config)
+ elif service_config["llm_service"].lower() == "groq":
+ return Groq(service_config)
+ elif service_config["llm_service"].lower() == "ollama":
+ return Ollama(service_config)
+ elif service_config["llm_service"].lower() == "huggingface":
+ return HuggingFaceEndpoint(service_config)
+ elif service_config["llm_service"].lower() == "watsonx":
+ return IBMWatsonX(service_config)
else:
raise Exception("LLM Completion Service Not Supported")
@@ -269,3 +334,190 @@ def get_multimodal_service() -> LLM_Model:
support_ai_instance=True,
)
service_status["embedding_store"] = {"status": "ok", "error": None}
+
+
+def reload_llm_config(new_llm_config: dict = None):
+ """
+ Reload LLM configuration and reinitialize services.
+
+ Args:
+ new_llm_config: If provided, saves this config to file first.
+ If None, just reloads from existing file.
+
+ Returns:
+ dict: Status of reload operation
+ """
+ global llm_config, embedding_service, completion_config, embedding_config, multimodal_config
+
+ try:
+ # If new config provided, save it first
+ if new_llm_config is not None:
+ with open(SERVER_CONFIG, "r") as f:
+ server_config = json.load(f)
+
+ # Preserve existing API keys if not provided in new config
+ existing_llm_config = server_config.get("llm_config", {})
+
+ # Directly save the new LLM config without preserving old API keys
+ server_config["llm_config"] = new_llm_config
+
+ with open(SERVER_CONFIG, "w") as f:
+ json.dump(server_config, f, indent=2)
+
+ # Read/reload from file
+ with open(SERVER_CONFIG, "r") as f:
+ server_config = json.load(f)
+
+ # Validate before updating
+ new_llm_config = server_config.get("llm_config")
+ if new_llm_config is None:
+ raise Exception("llm_config is not found in SERVER_CONFIG")
+
+ # Inject authentication_configuration into service configs BEFORE updating globals
+ if "authentication_configuration" in new_llm_config:
+ if "completion_service" in new_llm_config:
+ new_llm_config["completion_service"]["authentication_configuration"] = new_llm_config["authentication_configuration"]
+ if "embedding_service" in new_llm_config:
+ new_llm_config["embedding_service"]["authentication_configuration"] = new_llm_config["authentication_configuration"]
+ if "multimodal_service" in new_llm_config:
+ new_llm_config["multimodal_service"]["authentication_configuration"] = new_llm_config["authentication_configuration"]
+
+ new_completion_config = new_llm_config.get("completion_service")
+ new_embedding_config = new_llm_config.get("embedding_service")
+ new_multimodal_config = new_llm_config.get("multimodal_service")
+
+ if new_completion_config is None:
+ raise Exception("completion_service is not found in llm_config")
+ if new_embedding_config is None:
+ raise Exception("embedding_service is not found in llm_config")
+
+ # Update llm_config in-place to preserve references in other modules (ui.py imports this)
+ llm_config.clear()
+ llm_config.update(new_llm_config)
+
+ # Update service configs in-place to preserve references
+ completion_config.clear()
+ completion_config.update(new_completion_config)
+
+ embedding_config.clear()
+ embedding_config.update(new_embedding_config)
+
+ # multimodal_config can be reassigned (not imported elsewhere)
+ multimodal_config = new_multimodal_config
+
+ # Re-initialize embedding service
+ if embedding_config["embedding_model_service"].lower() == "openai":
+ embedding_service = OpenAI_Embedding(embedding_config)
+ elif embedding_config["embedding_model_service"].lower() == "azure":
+ embedding_service = AzureOpenAI_Ada002(embedding_config)
+ elif embedding_config["embedding_model_service"].lower() == "vertexai":
+ embedding_service = VertexAI_PaLM_Embedding(embedding_config)
+ elif embedding_config["embedding_model_service"].lower() == "genai":
+ embedding_service = GenAI_Embedding(embedding_config)
+ elif embedding_config["embedding_model_service"].lower() == "bedrock":
+ embedding_service = AWS_Bedrock_Embedding(embedding_config)
+ elif embedding_config["embedding_model_service"].lower() == "ollama":
+ embedding_service = Ollama_Embedding(embedding_config)
+ else:
+ raise Exception("Embedding service not implemented")
+
+ return {
+ "status": "success",
+ "message": "LLM configuration reloaded successfully"
+ }
+
+ except Exception as e:
+ return {
+ "status": "error",
+ "message": f"Failed to reload LLM config: {str(e)}"
+ }
+
+
+def reload_db_config(new_db_config: dict = None):
+ """
+ Reload DB configuration from server_config.json and update in-memory config.
+
+ Args:
+ new_db_config: If provided, saves this config to file first.
+ If None, just reloads from existing file.
+
+ Returns:
+ dict: Status of reload operation
+ """
+ global db_config
+
+ try:
+ if new_db_config is not None:
+ with open(SERVER_CONFIG, "r") as f:
+ server_config = json.load(f)
+
+ server_config["db_config"] = new_db_config
+
+ temp_file = f"{SERVER_CONFIG}.tmp"
+ with open(temp_file, "w") as f:
+ json.dump(server_config, f, indent=2)
+ os.replace(temp_file, SERVER_CONFIG)
+
+ with open(SERVER_CONFIG, "r") as f:
+ server_config = json.load(f)
+
+ new_db_config = server_config.get("db_config")
+ if new_db_config is None:
+ raise Exception("db_config is not found in SERVER_CONFIG")
+
+ db_config.clear()
+ db_config.update(new_db_config)
+
+ return {
+ "status": "success",
+ "message": "DB configuration reloaded successfully"
+ }
+ except Exception as e:
+ return {
+ "status": "error",
+ "message": f"Failed to reload DB config: {str(e)}"
+ }
+
+
+def reload_graphrag_config():
+ """
+ Reload GraphRAG configuration from server_config.json.
+ Updates the in-memory graphrag_config dict to reflect changes immediately.
+
+ Returns:
+ dict: Status of reload operation
+ """
+ global graphrag_config
+
+ try:
+ # Read from file
+ with open(SERVER_CONFIG, "r") as f:
+ server_config = json.load(f)
+
+ # Validate
+ new_graphrag_config = server_config.get("graphrag_config")
+ if new_graphrag_config is None:
+ new_graphrag_config = {"reuse_embedding": True}
+
+ # Set defaults (same as startup logic)
+ if "chunker" not in new_graphrag_config:
+ new_graphrag_config["chunker"] = "semantic"
+ if "extractor" not in new_graphrag_config:
+ new_graphrag_config["extractor"] = "llm"
+
+ # Update graphrag_config in-place to preserve references in other modules
+ graphrag_config.clear()
+ graphrag_config.update(new_graphrag_config)
+
+ logger.info(f"GraphRAG config reloaded: extractor={graphrag_config.get('extractor')}, chunker={graphrag_config.get('chunker')}, reuse_embedding={graphrag_config.get('reuse_embedding')}")
+
+ return {
+ "status": "success",
+ "message": "GraphRAG configuration reloaded successfully"
+ }
+
+ except Exception as e:
+ return {
+ "status": "error",
+ "message": f"Failed to reload GraphRAG config: {str(e)}"
+ }
\ No newline at end of file
diff --git a/common/embeddings/embedding_services.py b/common/embeddings/embedding_services.py
index 1597cd2..6f170d0 100644
--- a/common/embeddings/embedding_services.py
+++ b/common/embeddings/embedding_services.py
@@ -184,9 +184,9 @@ class VertexAI_PaLM_Embedding(EmbeddingModel):
def __init__(self, config):
super().__init__(config, model_name=config.get("model_name", "VertexAI PaLM"))
- from langchain.embeddings import VertexAIEmbeddings
+ from langchain_google_vertexai import VertexAIEmbeddings
- self.embeddings = VertexAIEmbeddings(model_name=self.model_name)
+ self.embeddings = VertexAIEmbeddings(model=self.model_name)
class GenAI_Embedding(EmbeddingModel):
@@ -243,3 +243,4 @@ def __init__(self, config):
model=self.model_name,
base_url=base_url
)
+
diff --git a/common/llm_services/google_vertexai_service.py b/common/llm_services/google_vertexai_service.py
index 22679f5..2fec35b 100644
--- a/common/llm_services/google_vertexai_service.py
+++ b/common/llm_services/google_vertexai_service.py
@@ -9,11 +9,11 @@
class GoogleVertexAI(LLM_Model):
def __init__(self, config):
super().__init__(config)
- from langchain_community.llms import VertexAI
+ from langchain_google_vertexai import VertexAI
model_name = config["llm_model"]
self.llm = VertexAI(
- model_name=model_name, max_output_tokens=1000, **config["model_kwargs"]
+ model=model_name, max_output_tokens=1000, **config["model_kwargs"]
)
self.prompt_path = config["prompt_path"]
@@ -38,3 +38,4 @@ def entity_relationship_extraction_prompt(self):
@property
def model(self):
return self.llm
+
diff --git a/common/requirements.txt b/common/requirements.txt
index d5a2d5b..12c9fcf 100644
--- a/common/requirements.txt
+++ b/common/requirements.txt
@@ -80,6 +80,7 @@ kiwisolver==1.4.8
langchain>=0.3.26
langchain-core>=0.3.26
langchain_google_genai==2.1.8
+langchain-google-vertexai==2.1.2
langchain-community==0.3.26
langchain-experimental==0.3.5rc1
langchain-groq==0.3.4
diff --git a/docs/tutorials/configs/nginx.conf b/docs/tutorials/configs/nginx.conf
index dc09929..975d8a0 100644
--- a/docs/tutorials/configs/nginx.conf
+++ b/docs/tutorials/configs/nginx.conf
@@ -14,6 +14,16 @@ server {
proxy_pass http://graphrag-ui:3000/;
}
+ location /setup {
+ rewrite ^/setup$ / break;
+ proxy_pass http://graphrag-ui:3000;
+ }
+
+ location /setup/ {
+ rewrite ^/setup/.*$ / break;
+ proxy_pass http://graphrag-ui:3000;
+ }
+
location /chat-dialog {
proxy_pass http://graphrag-ui:3000/;
diff --git a/ecc/app/common b/ecc/app/common
deleted file mode 120000
index dc879ab..0000000
--- a/ecc/app/common
+++ /dev/null
@@ -1 +0,0 @@
-../../common
\ No newline at end of file
diff --git a/ecc/app/configs b/ecc/app/configs
deleted file mode 120000
index 5992d10..0000000
--- a/ecc/app/configs
+++ /dev/null
@@ -1 +0,0 @@
-../../configs
\ No newline at end of file
diff --git a/ecc/app/ecc_util.py b/ecc/app/ecc_util.py
index 75a3f87..aa6c5ed 100644
--- a/ecc/app/ecc_util.py
+++ b/ecc/app/ecc_util.py
@@ -1,5 +1,5 @@
from common.chunkers import character_chunker, regex_chunker, semantic_chunker, markdown_chunker, recursive_chunker, html_chunker, single_chunker
-from common.config import graphrag_config, embedding_service, llm_config
+from common.config import graphrag_config, embedding_service, llm_config, get_completion_config
from common.llm_services import (
AWS_SageMaker_Endpoint,
AWSBedrock,
@@ -55,24 +55,27 @@ def get_chunker(chunker_type: str = ""):
return chunker
-def get_llm_service():
- if llm_config["completion_service"]["llm_service"].lower() == "openai":
- llm_provider = OpenAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "azure":
- llm_provider = AzureOpenAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "sagemaker":
- llm_provider = AWS_SageMaker_Endpoint(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "vertexai":
- llm_provider = GoogleVertexAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "genai":
- llm_provider = GoogleGenAI(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "bedrock":
- llm_provider = AWSBedrock(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "groq":
- llm_provider = Groq(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "ollama":
- llm_provider = Ollama(llm_config["completion_service"])
- elif llm_config["completion_service"]["llm_service"].lower() == "huggingface":
- llm_provider = HuggingFaceEndpoint(llm_config["completion_service"])
+def get_llm_service(graphname=None):
+ config = get_completion_config(graphname)
+ if config["llm_service"].lower() == "openai":
+ llm_provider = OpenAI(config)
+ elif config["llm_service"].lower() == "azure":
+ llm_provider = AzureOpenAI(config)
+ elif config["llm_service"].lower() == "sagemaker":
+ llm_provider = AWS_SageMaker_Endpoint(config)
+ elif config["llm_service"].lower() == "vertexai":
+ llm_provider = GoogleVertexAI(config)
+ elif config["llm_service"].lower() == "genai":
+ llm_provider = GoogleGenAI(config)
+ elif config["llm_service"].lower() == "bedrock":
+ llm_provider = AWSBedrock(config)
+ elif config["llm_service"].lower() == "groq":
+ llm_provider = Groq(config)
+ elif config["llm_service"].lower() == "ollama":
+ llm_provider = Ollama(config)
+ elif config["llm_service"].lower() == "huggingface":
+ llm_provider = HuggingFaceEndpoint(config)
+ else:
+ raise Exception("LLM Completion Service Not Supported")
return llm_provider
diff --git a/ecc/app/graphrag/community_summarizer.py b/ecc/app/graphrag/community_summarizer.py
index 0bab35b..803ad17 100644
--- a/ecc/app/graphrag/community_summarizer.py
+++ b/ecc/app/graphrag/community_summarizer.py
@@ -12,26 +12,45 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
import re
+import logging
from langchain_core.prompts import PromptTemplate
from common.llm_services import LLM_Model
from common.py_schemas import CommunitySummary
+from common.config import completion_config
+
+logger = logging.getLogger(__name__)
+
+
+# Load prompt from file
+def load_community_prompt():
+ prompt_path = completion_config.get("prompt_path", "./common/prompts/openai_gpt4/")
+ if prompt_path.startswith("./"):
+ prompt_path = prompt_path[2:]
+ prompt_path = prompt_path.rstrip("/")
+
+ prompt_file = os.path.join(prompt_path, "community_summarization.txt")
+ if not os.path.exists(prompt_file):
+ error_msg = f"Community summarization prompt file not found: {prompt_file}. Please ensure the file exists in the configured prompt path."
+ logger.error(error_msg)
+ raise FileNotFoundError(error_msg)
+
+ try:
+ with open(prompt_file, "r", encoding="utf-8") as f:
+ content = f.read()
+ logger.info(f"Successfully loaded community summarization prompt from: {prompt_file}")
+ return content
+ except Exception as e:
+ error_msg = f"Failed to read community summarization prompt from {prompt_file}: {str(e)}"
+ logger.error(error_msg)
+ raise Exception(error_msg)
+
# src: https://github.com/microsoft/graphrag/blob/main/graphrag/index/graph/extractors/summarize/prompts.py
-SUMMARIZE_PROMPT = PromptTemplate.from_template("""
-You are a helpful assistant responsible for generating a comprehensive summary of the data provided below.
-Given one or two entities, and a list of descriptions, all related to the same entity or group of entities.
-Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions.
-If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary, but do not add any information that is not in the description.
-Make sure it is written in third person, and include the entity names so we the have full context.
-
-#######
--Data-
-Commuinty Title: {entity_name}
-Description List: {description_list}
-""")
+SUMMARIZE_PROMPT = PromptTemplate.from_template(load_community_prompt())
id_pat = re.compile(r"[_\d]*")
@@ -58,4 +77,4 @@ async def summarize(self, name: str, text: list[str]) -> CommunitySummary:
)
except Exception as e:
return {"error": True, "summary": "", "message": str(e)}
- return {"error": False, "summary": summary.summary}
+ return {"error": False, "summary": summary.summary}
\ No newline at end of file
diff --git a/ecc/app/graphrag/workers.py b/ecc/app/graphrag/workers.py
index 78f38be..4f8543a 100644
--- a/ecc/app/graphrag/workers.py
+++ b/ecc/app/graphrag/workers.py
@@ -440,7 +440,7 @@ async def process_community(
if len(children) == 1:
summary = children[0]
else:
- llm = ecc_util.get_llm_service()
+ llm = ecc_util.get_llm_service(conn.graphname)
summarizer = community_summarizer.CommunitySummarizer(llm)
summary = await summarizer.summarize(comm_id, children)
if summary["error"]:
diff --git a/ecc/app/main.py b/ecc/app/main.py
index 5468391..b74989a 100644
--- a/ecc/app/main.py
+++ b/ecc/app/main.py
@@ -36,6 +36,7 @@
embedding_service,
get_llm_service,
llm_config,
+ reload_db_config,
)
from common.db.connections import elevate_db_connection_to_token, get_db_connection_id_token
from common.embeddings.base_embedding_store import EmbeddingStore
@@ -213,6 +214,41 @@ async def run_with_tracking(task_key: str, run_func, graphname: str, conn):
try:
running_tasks[task_key] = {"status": "running", "started_at": time.time()}
LogWriter.info(f"Starting ECC task: {task_key}")
+
+ # Reload config at the start of each job to ensure latest settings are used
+ LogWriter.info("📥 Reloading configuration for new job...")
+ from common.config import reload_llm_config, reload_graphrag_config, reload_db_config
+
+ llm_result = reload_llm_config()
+ if llm_result["status"] == "success":
+ LogWriter.info(f"✅ LLM config reloaded: {llm_result['message']}")
+ completion_service = llm_config.get("completion_service", {})
+ ecc_model = completion_service.get("llm_model", "unknown")
+ ecc_provider = completion_service.get("llm_service", "unknown")
+ LogWriter.info(
+ f"[ECC] Using completion model={ecc_model} (provider={ecc_provider})"
+ )
+ else:
+ LogWriter.warning(f"⚠️ LLM config reload had issues: {llm_result['message']}")
+
+ db_result = reload_db_config()
+ if db_result["status"] == "success":
+ LogWriter.info(
+ f"✅ DB config reloaded: {db_result['message']} "
+ f"(host={db_config.get('hostname')}, "
+ f"restppPort={db_config.get('restppPort')}, "
+ f"gsPort={db_config.get('gsPort')})"
+ )
+ else:
+ LogWriter.warning(f"⚠️ DB config reload had issues: {db_result['message']}")
+
+ graphrag_result = reload_graphrag_config()
+ if graphrag_result["status"] == "success":
+ LogWriter.info(f"✅ GraphRAG config reloaded: {graphrag_result['message']}")
+ else:
+ LogWriter.warning(f"⚠️ GraphRAG config reload had issues: {graphrag_result['message']}")
+
+ # Now run the actual job with fresh config
await run_func(graphname, conn)
running_tasks[task_key] = {"status": "completed", "completed_at": time.time()}
LogWriter.info(f"Completed ECC task: {task_key}")
@@ -242,6 +278,17 @@ def consistency_update(
response: Response,
credentials = Depends(auth_credentials),
):
+ db_result = reload_db_config()
+ if db_result["status"] == "success":
+ LogWriter.info(
+ f"✅ DB config reloaded: {db_result['message']} "
+ f"(host={db_config.get('hostname')}, "
+ f"restppPort={db_config.get('restppPort')}, "
+ f"gsPort={db_config.get('gsPort')})"
+ )
+ else:
+ LogWriter.warning(f"⚠️ DB config reload had issues: {db_result['message']}")
+
if isinstance(credentials, HTTPBasicCredentials):
conn = elevate_db_connection_to_token(
db_config.get("hostname"),
diff --git a/graphrag-ui/src/components/Bot.tsx b/graphrag-ui/src/components/Bot.tsx
index 1f4e4e6..266fe2e 100644
--- a/graphrag-ui/src/components/Bot.tsx
+++ b/graphrag-ui/src/components/Bot.tsx
@@ -85,6 +85,7 @@ const Bot = ({ layout, getConversationId }: { layout?: string | undefined, getCo
const handleSelect = (value) => {
setSelectedGraph(value);
localStorage.setItem("selectedGraph", value);
+ window.dispatchEvent(new Event("graphrag:selectedGraph"));
navigate("/chat");
//window.location.reload();
};
diff --git a/graphrag-ui/src/components/ModeToggle.tsx b/graphrag-ui/src/components/ModeToggle.tsx
index a8109bc..8049162 100644
--- a/graphrag-ui/src/components/ModeToggle.tsx
+++ b/graphrag-ui/src/components/ModeToggle.tsx
@@ -1,4 +1,5 @@
import { Moon, Sun, LogOut, Settings } from "lucide-react";
+import React from "react";
import { useLocation, useNavigate } from "react-router-dom";
import { Button } from "@/components/ui/button";
@@ -17,6 +18,67 @@ export function ModeToggle() {
const location = useLocation();
const isLoginRoute = location.pathname === "/";
const [confirm, confirmDialog] = useConfirm();
+ const [userRoles, setUserRoles] = React.useState([]);
+ const [graphRoles, setGraphRoles] = React.useState>({});
+ const [rolesLoaded, setRolesLoaded] = React.useState(false);
+ const [selectedGraph, setSelectedGraph] = React.useState(
+ localStorage.getItem("selectedGraph") || ""
+ );
+ const isGraphAdmin = (graphRoles[selectedGraph] || []).includes("admin");
+ const canAccessSetup =
+ userRoles.includes("superuser") ||
+ userRoles.includes("globaldesigner") ||
+ isGraphAdmin;
+
+ React.useEffect(() => {
+ const loadRoles = async () => {
+ try {
+ const creds = localStorage.getItem("creds");
+ if (!creds) {
+ setUserRoles([]);
+ setRolesLoaded(true);
+ return;
+ }
+ const response = await fetch("/ui/roles", {
+ headers: { Authorization: `Basic ${creds}` },
+ });
+ if (!response.ok) {
+ setUserRoles([]);
+ setRolesLoaded(true);
+ return;
+ }
+ const data = await response.json();
+ const roles = Array.isArray(data.roles) ? data.roles : [];
+ setUserRoles(roles.map((role: string) => role.toLowerCase()));
+ setGraphRoles(
+ data.graph_roles && typeof data.graph_roles === "object"
+ ? Object.fromEntries(
+ Object.entries(data.graph_roles).map(([graph, roles]) => [
+ graph,
+ Array.isArray(roles)
+ ? roles.map((role: string) => role.toLowerCase())
+ : [],
+ ])
+ )
+ : {}
+ );
+ setSelectedGraph(localStorage.getItem("selectedGraph") || "");
+ } finally {
+ setRolesLoaded(true);
+ }
+ };
+ loadRoles();
+ }, [location.pathname]);
+
+ React.useEffect(() => {
+ const handleGraphChange = () => {
+ setSelectedGraph(localStorage.getItem("selectedGraph") || "");
+ };
+ window.addEventListener("graphrag:selectedGraph", handleGraphChange);
+ return () => {
+ window.removeEventListener("graphrag:selectedGraph", handleGraphChange);
+ };
+ }, []);
const handleLogout = async () => {
// Show confirmation dialog
@@ -46,7 +108,7 @@ export function ModeToggle() {
return (
- {!isLoginRoute && (
+ {!isLoginRoute && rolesLoaded && canAccessSetup && (
@@ -1088,7 +1261,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={azureAccountKey}
onChange={(e) => setAzureAccountKey(e.target.value)}
placeholder="Enter account key"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
/>
@@ -1100,7 +1273,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={azureContainer}
onChange={(e) => setAzureContainer(e.target.value)}
placeholder="my-container"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
/>
@@ -1112,7 +1285,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={cloudPrefix}
onChange={(e) => setCloudPrefix(e.target.value)}
placeholder="folder/subfolder/"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
/>
>
@@ -1183,7 +1356,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
{downloadedFiles.map((file, index) => (
{file.name}
@@ -1212,8 +1385,8 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
Process downloaded files and add them to the knowledge graph
handleIngestDocuments("downloaded")}
- disabled={isIngesting}
+ onClick={() => handleRunIngest("downloaded")}
+ disabled={isIngesting || isProcessingFiles}
className="gradient text-white w-full"
>
{isIngesting ? (
@@ -1221,6 +1394,11 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
Ingesting...
>
+ ) : isProcessingFiles ? (
+ <>
+
+ Processing files...
+ >
) : (
<>
@@ -1228,19 +1406,6 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
>
)}
- {ingestMessage && (
-
- {ingestMessage}
-
- )}
)}
@@ -1264,7 +1429,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={awsAccessKey}
onChange={(e) => setAwsAccessKey(e.target.value)}
placeholder="Enter AWS access key"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
disabled={isIngesting}
/>
@@ -1278,7 +1443,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={awsSecretKey}
onChange={(e) => setAwsSecretKey(e.target.value)}
placeholder="Enter AWS secret key"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
disabled={isIngesting}
/>
@@ -1304,7 +1469,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={inputBucket}
onChange={(e) => setInputBucket(e.target.value)}
placeholder="Enter input bucket name"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
disabled={isIngesting || skipBDAProcessing}
/>
@@ -1318,7 +1483,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={outputBucket}
onChange={(e) => setOutputBucket(e.target.value)}
placeholder="Enter output bucket name"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
disabled={isIngesting}
/>
@@ -1332,7 +1497,7 @@ const IngestGraph: React.FC = ({ isModal = false }) => {
value={regionName}
onChange={(e) => setRegionName(e.target.value)}
placeholder="e.g., us-east-1"
- className="dark:border-[#3D3D3D] dark:bg-background"
+ className="dark:border-[#3D3D3D] dark:bg-shadeA"
disabled={isIngesting}
/>
diff --git a/graphrag-ui/src/pages/setup/KGAdmin.tsx b/graphrag-ui/src/pages/setup/KGAdmin.tsx
index 272553f..cea8389 100644
--- a/graphrag-ui/src/pages/setup/KGAdmin.tsx
+++ b/graphrag-ui/src/pages/setup/KGAdmin.tsx
@@ -1,4 +1,4 @@
-import React, { useState, useEffect } from "react";
+import React, { useState, useEffect, useRef } from "react";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import { Database, Loader2, RefreshCw, Upload } from "lucide-react";
@@ -6,6 +6,7 @@ import {
Dialog,
DialogContent,
DialogDescription,
+ DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
@@ -21,7 +22,7 @@ import { useNavigate } from "react-router-dom";
import IngestGraph from "./IngestGraph";
const KGAdmin = () => {
- const [confirm, confirmDialog] = useConfirm();
+ const [confirm, confirmDialog, isConfirmDialogOpen] = useConfirm();
const navigate = useNavigate();
const [availableGraphs, setAvailableGraphs] = useState([]);
@@ -32,7 +33,7 @@ const KGAdmin = () => {
// Reset states when dialogs close
const handleInitializeDialogChange = (open: boolean) => {
- if (!open && isInitializing) {
+ if (!open && isConfirmDialogOpen) {
return;
}
setInitializeDialogOpen(open);
@@ -44,6 +45,9 @@ const KGAdmin = () => {
};
const handleRefreshDialogChange = (open: boolean) => {
+ if (!open && isConfirmDialogOpen) {
+ return;
+ }
setRefreshDialogOpen(open);
if (!open) {
setRefreshMessage("");
@@ -61,6 +65,7 @@ const KGAdmin = () => {
const [isRefreshing, setIsRefreshing] = useState(false);
const [refreshMessage, setRefreshMessage] = useState("");
const [isRebuildRunning, setIsRebuildRunning] = useState(false);
+ const isRebuildRunningRef = useRef(false);
const [isCheckingStatus, setIsCheckingStatus] = useState(false);
// Load available graphs
@@ -150,20 +155,23 @@ const KGAdmin = () => {
}
setStatusMessage(
- `✅ Graph "${graphName}" created and initialized successfully!`
+ `✅ Graph "${graphName}" created and initialized successfully! You can now close this dialog.`
);
setStatusType("success");
- const store = JSON.parse(localStorage.getItem("site") || "{}");
- if (!store.graphs) {
- store.graphs = [];
- }
- if (!store.graphs.includes(graphName)) {
- store.graphs.push(graphName);
- localStorage.setItem("site", JSON.stringify(store));
- setAvailableGraphs([...store.graphs]);
- }
+ const newGraph = graphName;
+ setAvailableGraphs(prev => {
+ if (!prev.includes(newGraph)) {
+ const updated = [...prev, newGraph];
+ const store = JSON.parse(localStorage.getItem("site") || "{}");
+ store.graphs = updated;
+ localStorage.setItem("site", JSON.stringify(store));
+ return updated;
+ }
+ return prev;
+ });
+ setRefreshGraphName(graphName);
setGraphName("");
} catch (error: any) {
console.error("Error creating graph:", error);
@@ -195,10 +203,11 @@ const KGAdmin = () => {
if (statusResponse.ok) {
const statusData = await statusResponse.json();
- const wasRunning = isRebuildRunning;
+ const wasRunning = isRebuildRunningRef.current;
const isCurrentlyRunning = statusData.is_running || false;
setIsRebuildRunning(isCurrentlyRunning);
+ isRebuildRunningRef.current = isCurrentlyRunning;
if (isCurrentlyRunning) {
const startTime = statusData.started_at
@@ -207,26 +216,25 @@ const KGAdmin = () => {
setRefreshMessage(
`⚠️ A rebuild is already in progress for "${graphName}" (started at ${startTime}). Please wait for it to complete.`
);
+ } else if (wasRunning && statusData.status === "completed") {
+ setRefreshMessage(`✅ Rebuild completed successfully for "${graphName}".`);
+ } else if (statusData.status === "failed") {
+ setRefreshMessage(`❌ Previous rebuild failed: ${statusData.error || "Unknown error"}`);
+ } else if (statusData.status === "error") {
+ setRefreshMessage(`❌ Failed to check rebuild status: ${statusData.error || "Unknown error"}`);
+ } else if (statusData.status === "unknown") {
+ setRefreshMessage(`⚠️ ECC service returned unknown status. It may be unavailable.`);
} else {
- if (wasRunning && statusData.status === "completed") {
- setRefreshMessage(
- `✅ Rebuild completed successfully for "${graphName}".`
- );
- } else if (statusData.status === "failed") {
- setRefreshMessage(
- `❌ Previous rebuild failed: ${statusData.error || "Unknown error"}`
- );
- } else {
- if (!showLoadingMessage) {
- setRefreshMessage("");
- }
- }
+ setRefreshMessage("");
}
+ } else {
+ setRefreshMessage(`❌ Failed to check rebuild status (HTTP ${statusResponse.status}).`);
}
} catch (error: any) {
console.error("Error checking rebuild status:", error);
- setIsRebuildRunning(false);
- setRefreshMessage("");
+ if (showLoadingMessage) {
+ setRefreshMessage(`❌ Unable to reach ECC service: ${error.message || "Connection failed"}`);
+ }
} finally {
setIsCheckingStatus(false);
}
@@ -246,20 +254,41 @@ const KGAdmin = () => {
return;
}
+ setIsRefreshing(true);
+
const shouldRefresh = await confirm(
`Are you sure you want to refresh the knowledge graph "${refreshGraphName}"? This will rebuild the graph content.`
);
if (!shouldRefresh) {
setRefreshMessage("Operation cancelled by user.");
+ setIsRefreshing(false);
return;
}
- setIsRefreshing(true);
- setRefreshMessage("Submitting rebuild request...");
+ setRefreshMessage("Verifying rebuild status...");
try {
const creds = localStorage.getItem("creds");
+ // Final status check to prevent race conditions
+ const statusCheckResponse = await fetch(`/ui/${refreshGraphName}/rebuild_status`, {
+ method: "GET",
+ headers: { Authorization: `Basic ${creds}` },
+ });
+
+ if (statusCheckResponse.ok) {
+ const statusData = await statusCheckResponse.json();
+ if (statusData.is_running) {
+ setRefreshMessage(`⚠️ A rebuild is already in progress for "${refreshGraphName}". Please wait for it to complete.`);
+ setIsRebuildRunning(true);
+ isRebuildRunningRef.current = true;
+ setIsRefreshing(false);
+ return;
+ }
+ }
+
+ setRefreshMessage("Submitting rebuild request...");
+
const response = await fetch(`/ui/${refreshGraphName}/rebuild_graph`, {
method: "POST",
headers: { Authorization: `Basic ${creds}` },
@@ -267,6 +296,11 @@ const KGAdmin = () => {
if (!response.ok) {
const errorData = await response.json();
+ if (response.status === 409) {
+ setRefreshMessage(`⚠️ ${errorData.detail || errorData.message}`);
+ setIsRefreshing(false);
+ return;
+ }
throw new Error(
errorData.detail || `Failed to refresh graph: ${response.statusText}`
);
@@ -279,6 +313,7 @@ const KGAdmin = () => {
`✅ Refresh submitted successfully! The knowledge graph "${refreshGraphName}" is being rebuilt.`
);
setIsRebuildRunning(true);
+ isRebuildRunningRef.current = true;
} catch (error: any) {
console.error("Error refreshing graph:", error);
setRefreshMessage(`❌ Error: ${error.message}`);
@@ -390,30 +425,27 @@ const KGAdmin = () => {
{/* Initialize Dialog */}