forked from yusufkaraaslan/Skill_Seekers
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_install_multiplatform.py
More file actions
138 lines (106 loc) · 5.26 KB
/
test_install_multiplatform.py
File metadata and controls
138 lines (106 loc) · 5.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#!/usr/bin/env python3
"""
Tests for multi-platform install workflow
"""
import unittest
from pathlib import Path
from unittest.mock import MagicMock, patch
class TestInstallCLI(unittest.TestCase):
"""Test install_skill CLI with multi-platform support"""
def test_cli_accepts_target_flag(self):
"""Test that CLI accepts --target flag"""
import argparse
import sys
# Mock sys.path to import install_skill module
sys.path.insert(0, str(Path(__file__).parent.parent / "src" / "skill_seekers" / "cli"))
try:
# Create parser like install_skill.py does
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True)
parser.add_argument(
"--target", choices=["claude", "gemini", "openai", "markdown"], default="claude"
)
# Test that each platform is accepted
for platform in ["claude", "gemini", "openai", "markdown"]:
args = parser.parse_args(["--config", "test", "--target", platform])
self.assertEqual(args.target, platform)
# Test default is claude
args = parser.parse_args(["--config", "test"])
self.assertEqual(args.target, "claude")
finally:
sys.path.pop(0)
def test_cli_rejects_invalid_target(self):
"""Test that CLI rejects invalid --target values"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True)
parser.add_argument(
"--target", choices=["claude", "gemini", "openai", "markdown"], default="claude"
)
# Should raise SystemExit for invalid target
with self.assertRaises(SystemExit):
parser.parse_args(["--config", "test", "--target", "invalid"])
class TestInstallToolMultiPlatform(unittest.IsolatedAsyncioTestCase):
"""Test install_skill_tool with multi-platform support"""
async def test_install_tool_accepts_target_parameter(self):
"""Test that install_skill_tool accepts target parameter"""
from skill_seekers.mcp.tools.packaging_tools import install_skill_tool
# Just test dry_run mode which doesn't need mocking all internal tools
# Test with each platform
for target in ["claude", "gemini", "openai"]:
# Use dry_run=True which skips actual execution
# It will still show us the platform is being recognized
with (
patch("builtins.open", create=True) as mock_open,
patch("json.load") as mock_json_load,
):
# Mock config file reading
mock_json_load.return_value = {"name": "test-skill"}
mock_file = MagicMock()
mock_file.__enter__ = lambda s: s
mock_file.__exit__ = MagicMock()
mock_open.return_value = mock_file
result = await install_skill_tool(
{"config_path": "configs/test.json", "target": target, "dry_run": True}
)
# Verify result mentions the correct platform
result_text = result[0].text
self.assertIsInstance(result_text, str)
self.assertIn("WORKFLOW COMPLETE", result_text)
async def test_install_tool_uses_correct_adaptor(self):
"""Test that install_skill_tool uses the correct adaptor for each platform"""
from skill_seekers.cli.adaptors import get_adaptor
# Test that each platform creates the right adaptor
for target in ["claude", "gemini", "openai", "markdown"]:
adaptor = get_adaptor(target)
self.assertEqual(adaptor.PLATFORM, target)
async def test_install_tool_platform_specific_api_keys(self):
"""Test that install_tool checks for correct API key per platform"""
from skill_seekers.cli.adaptors import get_adaptor
# Test API key env var names
claude_adaptor = get_adaptor("claude")
self.assertEqual(claude_adaptor.get_env_var_name(), "ANTHROPIC_API_KEY")
gemini_adaptor = get_adaptor("gemini")
self.assertEqual(gemini_adaptor.get_env_var_name(), "GOOGLE_API_KEY")
openai_adaptor = get_adaptor("openai")
self.assertEqual(openai_adaptor.get_env_var_name(), "OPENAI_API_KEY")
markdown_adaptor = get_adaptor("markdown")
# Markdown doesn't need an API key, but should still have a method
self.assertIsNotNone(markdown_adaptor.get_env_var_name())
class TestInstallWorkflowIntegration(unittest.IsolatedAsyncioTestCase):
"""Integration tests for full install workflow"""
async def test_dry_run_shows_correct_platform(self):
"""Test dry run shows correct platform in output"""
from skill_seekers.cli.adaptors import get_adaptor
# Test each platform shows correct platform name
platforms = {
"claude": "Claude AI (Anthropic)",
"gemini": "Google Gemini",
"openai": "OpenAI ChatGPT",
"markdown": "Generic Markdown (Universal)",
}
for target, expected_name in platforms.items():
adaptor = get_adaptor(target)
self.assertEqual(adaptor.PLATFORM_NAME, expected_name)
if __name__ == "__main__":
unittest.main()