-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathapp.py
More file actions
369 lines (303 loc) · 15.8 KB
/
app.py
File metadata and controls
369 lines (303 loc) · 15.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
import streamlit as st
import os
import json
import time
import subprocess
from datetime import datetime
from dotenv import load_dotenv
from google import genai
# Import functions from existing scripts
from discord_export import (
check_docker, export_discord_channel, compress_conversation,
load_last_timestamp, save_last_timestamp, get_most_recent_timestamp
)
# Set page config
st.set_page_config(
page_title="Discord Chat Analyzer",
page_icon="💬",
layout="wide",
initial_sidebar_state="expanded"
)
# Function to save API keys to .env file
def save_keys_to_env(discord_token, gemini_api_key):
with open(".env", "w") as f:
f.write(f"DISCORD_TOKEN={discord_token}\n")
f.write(f"GEMINI_API_KEY={gemini_api_key}\n")
load_dotenv(override=True)
# Function to setup Gemini model
def setup_gemini_model():
"""Configure and return Gemini model instance."""
api_key = os.getenv("GEMINI_API_KEY")
if not api_key:
st.error("Gemini API key not found. Please provide it in the settings.")
return None
client = genai.Client(api_key=api_key)
chat = client.chats.create(model="gemini-2.0-flash")
return chat
# Sidebar for settings
with st.sidebar:
st.title("⚙️ Settings")
# API Keys section
st.subheader("API Keys")
discord_token = st.text_input("Discord Token", value=os.getenv("DISCORD_TOKEN", ""), type="password",
help="Your Discord authentication token. Learn how to get it in the Token-and-IDs guide.")
gemini_api_key = st.text_input("Gemini API Key", value=os.getenv("GEMINI_API_KEY", ""), type="password",
help="Your Google Gemini API key.")
if st.button("Save API Keys"):
save_keys_to_env(discord_token, gemini_api_key)
st.success("API keys saved successfully!")
# Docker Status
st.subheader("Docker Status")
docker_status = check_docker()
if docker_status:
st.success("Docker is running")
else:
st.error("Docker is not running or not installed")
st.info("This tool requires Docker to be installed and running. Please check the Docker installation guide.")
# Main content
st.title("Discord Chat Analyzer")
st.markdown("Export and analyze your Discord conversations with AI assistance.")
# Tabs for different functions
tab1, tab2, tab3 = st.tabs(["Export", "View Conversations", "Analyze"])
# Export tab
with tab1:
st.header("Export Discord Conversations")
col1, col2 = st.columns(2)
with col1:
channel_id = st.text_input("Channel ID", help="The ID of the Discord channel you want to export.")
# Date range selection
date_options = st.radio("Export Range", ["Full History", "Date Range", "Incremental (since last export)"])
if date_options == "Date Range":
start_date = st.date_input("Start Date")
end_date = st.date_input("End Date")
else:
start_date = None
end_date = None
with col2:
st.subheader("Export Options")
export_format = st.selectbox("Export Format", ["Json", "HtmlDark", "HtmlLight", "Csv", "PlainText"], index=0)
download_media = st.checkbox("Download Media (images, avatars, etc.)", value=False)
include_threads = st.selectbox("Include Threads", ["none", "active", "all"], index=0)
export_button = st.button("Export Conversation", use_container_width=True, type="primary")
if export_button:
if not channel_id:
st.error("Please provide a Channel ID")
elif not os.getenv("DISCORD_TOKEN"):
st.error("Please provide a Discord Token in the settings")
elif not docker_status:
st.error("Docker is required but not available")
else:
# Set up progress bar
progress_bar = st.progress(0)
status_text = st.empty()
# Create output directory
output_dir = os.path.join(os.getcwd(), "team_chat")
os.makedirs(output_dir, exist_ok=True)
# Determine start date for incremental export
if date_options == "Incremental (since last export)":
last_timestamp = load_last_timestamp(channel_id)
if last_timestamp:
start_date_str = last_timestamp
status_text.info(f"Performing incremental export from {start_date_str}")
else:
status_text.info("No previous export found. Performing full export.")
start_date_str = None
elif date_options == "Date Range":
start_date_str = start_date.isoformat()
end_date_str = end_date.isoformat()
else:
start_date_str = None
end_date_str = None
progress_bar.progress(25, text="Starting export...")
# Prepare export command
docker_cmd = [
'docker', 'run', '--rm',
'-v', f"{output_dir}:/out",
'tyrrrz/discordchatexporter:stable', 'export',
'-f', export_format,
'-c', channel_id,
'-t', os.getenv("DISCORD_TOKEN")
]
# Add date range if specified
if start_date_str:
docker_cmd.extend(['--after', start_date_str])
if date_options == "Date Range" and end_date_str:
docker_cmd.extend(['--before', end_date_str])
# Add media download option if selected
if download_media:
docker_cmd.append('--media')
# Add threads option if not none
if include_threads != "none":
docker_cmd.extend(['--include-threads', include_threads])
# Execute export
try:
status_text.info("Exporting conversation... (this may take a while for large channels)")
subprocess.run(docker_cmd, check=True)
progress_bar.progress(75, text="Processing export...")
# Wait for file to be fully written
time.sleep(2)
# Find the exported file
if export_format == "Json":
json_files = [f for f in os.listdir(output_dir) if f.endswith('.json') and channel_id in f]
if json_files:
json_path = os.path.join(output_dir, json_files[0])
# Update timestamp for incremental exports
try:
with open(json_path, "r", encoding="utf-8") as f:
conversation = json.load(f)
latest_timestamp = get_most_recent_timestamp(conversation)
if latest_timestamp:
save_last_timestamp(channel_id, latest_timestamp)
except Exception as e:
status_text.error(f"Error processing JSON: {str(e)}")
progress_bar.progress(100, text="Export completed!")
st.success(f"Conversation exported successfully to: {output_dir}")
except subprocess.CalledProcessError as e:
progress_bar.empty()
st.error(f"Export failed: {str(e)}")
except Exception as e:
progress_bar.empty()
st.error(f"An error occurred: {str(e)}")
# View Conversations tab
with tab2:
st.header("View Exported Conversations")
output_dir = os.path.join(os.getcwd(), "team_chat")
os.makedirs(output_dir, exist_ok=True)
# List all exported files
exported_files = []
for f in os.listdir(output_dir):
file_path = os.path.join(output_dir, f)
if os.path.isfile(file_path):
file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
modified_time = datetime.fromtimestamp(os.path.getmtime(file_path))
exported_files.append({
"name": f,
"path": file_path,
"size": f"{file_size:.2f} MB",
"modified": modified_time.strftime("%Y-%m-%d %H:%M:%S")
})
# Display files in a table
if exported_files:
st.dataframe(exported_files,
column_config={
"name": "Filename",
"size": "Size",
"modified": "Last Modified",
"path": st.column_config.Column(
"File Path",
disabled=True
)
},
use_container_width=True)
# View file contents
selected_file = st.selectbox("Select a file to view:",
[f["name"] for f in exported_files],
index=None)
if selected_file:
file_path = os.path.join(output_dir, selected_file)
try:
if selected_file.endswith('.json'):
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
# Show conversation summary
st.subheader("Conversation Summary")
# Basic info
if "guild" in data:
st.info(f"Server: {data.get('guild', {}).get('name', 'Unknown')}")
st.info(f"Channel: {data.get('channel', {}).get('name', 'Unknown')}")
st.info(f"Message Count: {len(data.get('messages', []))}")
# Show messages
st.subheader("Messages")
with st.expander("View Messages", expanded=False):
for msg in data.get("messages", [])[:100]: # Limit to first 100 messages
author = msg.get("author", {}).get("nickname") or msg.get("author", {}).get("name", "Unknown")
timestamp = msg.get("timestamp", "")
content = msg.get("content", "").strip()
if content:
st.markdown(f"**{author}** ({timestamp}):")
st.markdown(content)
st.divider()
if len(data.get("messages", [])) > 100:
st.info("Only showing the first 100 messages. The full conversation is available for analysis.")
elif selected_file.endswith('.html'):
st.warning("HTML files can't be previewed here. Please open the file in a web browser.")
elif selected_file.endswith('.txt') or selected_file.endswith('.csv'):
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
st.text(content[:10000] + ("..." if len(content) > 10000 else ""))
else:
st.warning(f"Preview not available for this file type: {selected_file}")
except Exception as e:
st.error(f"Error reading file: {str(e)}")
else:
st.info("No exported files found. Use the Export tab to export conversations first.")
# Analyze tab
with tab3:
st.header("AI Analysis")
output_dir = os.path.join(os.getcwd(), "team_chat")
json_files = [f for f in os.listdir(output_dir) if f.endswith('.json')]
if not json_files:
st.info("No conversation files found. Please export a conversation first.")
else:
# Select file to analyze
selected_file = st.selectbox("Select conversation to analyze:",
json_files,
index=None)
if selected_file:
if not os.getenv("GEMINI_API_KEY"):
st.error("Please add your Gemini API key in Settings to use AI analysis.")
else:
try:
json_path = os.path.join(output_dir, selected_file)
with open(json_path, "r", encoding="utf-8") as f:
conversation = json.load(f)
# Compress conversation for analysis
st.info("Preparing conversation for analysis...")
summary = compress_conversation(conversation)
# Initialize the Gemini model for chat
chat_session = setup_gemini_model()
if chat_session:
# Send initial context message
with st.spinner("Initializing AI analysis..."):
context_prompt = f"""Here's a Discord conversation summary to analyze:
{summary[:50000]} # Limiting to 50K chars to avoid token limits
Please keep your responses focused on the content of this conversation."""
response = chat_session.send_message(context_prompt)
st.success("AI analysis ready! Ask questions about the conversation.")
# Create chat interface
query = st.text_input("Ask a question about this conversation:")
if query:
with st.spinner("Analyzing..."):
try:
response = chat_session.send_message(query)
st.markdown(response.text)
except Exception as e:
st.error(f"Error getting AI response: {str(e)}")
# Suggested questions
st.markdown("### Suggested questions:")
suggested_questions = [
"What are the main topics discussed in this conversation?",
"Summarize the key points from this conversation.",
"Who are the most active participants?",
"Are there any decisions or action items in this conversation?",
"What's the overall sentiment of this conversation?"
]
for q in suggested_questions:
if st.button(q):
with st.spinner("Analyzing..."):
try:
response = chat_session.send_message(q)
st.markdown(response.text)
except Exception as e:
st.error(f"Error getting AI response: {str(e)}")
except Exception as e:
st.error(f"Error processing conversation: {str(e)}")
# Footer
st.divider()
st.markdown("""
### Help & Resources
- This tool uses [DiscordChatExporter](https://github.com/Tyrrrz/DiscordChatExporter) with Docker to export Discord conversations.
- To get your Discord Token and Channel IDs, check the [Token-and-IDs guide](https://github.com/Tyrrrz/DiscordChatExporter/blob/master/.docs/Token-and-IDs.md).
- Analysis is powered by Google's Gemini AI. You'll need a Gemini API key from [Google AI Studio](https://makersuite.google.com/).
- Docker is required for this application to function properly.
""")