-
-
Notifications
You must be signed in to change notification settings - Fork 223
Expand file tree
/
Copy pathgen_schema_reference.py
More file actions
342 lines (300 loc) · 12.1 KB
/
gen_schema_reference.py
File metadata and controls
342 lines (300 loc) · 12.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
"""
Generates schema reference for dstack models.
"""
import importlib
import inspect
import logging
import re
from enum import Enum
from fnmatch import fnmatch
import mkdocs_gen_files
import yaml
from mkdocs.structure.files import File
from pydantic.main import BaseModel
from typing_extensions import Annotated, Any, Dict, Literal, Type, Union, get_args, get_origin
from dstack._internal.core.models.resources import Range
FILE_PATTERN = "docs/reference/**.md"
logger = logging.getLogger("mkdocs.plugins.dstack.schema")
logger.info("Generating schema reference...")
def _is_linkable_type(annotation: Any) -> bool:
"""Check if a type annotation contains a BaseModel subclass (excluding Range)."""
if inspect.isclass(annotation):
return issubclass(annotation, BaseModel) and not issubclass(annotation, Range)
origin = get_origin(annotation)
if origin is Annotated:
return _is_linkable_type(get_args(annotation)[0])
if origin is Union:
return any(_is_linkable_type(arg) for arg in get_args(annotation))
if origin is list:
args = get_args(annotation)
return bool(args) and _is_linkable_type(args[0])
return False
def _type_sort_key(t: str) -> tuple:
"""Sort key for type parts: primitives first, then literals, then compound types."""
order = {"bool": 0, "int": 1, "float": 2, "str": 3}
if t in order:
return (0, order[t])
if t.startswith('"'):
return (1, t)
if t.startswith("list"):
return (2, t)
if t == "dict":
return (3, "")
if t == "object":
return (4, "")
return (5, t)
def get_friendly_type(annotation: Type) -> str:
"""Get a user-friendly type string for documentation.
Produces types like: ``int | str``, ``"rps"``, ``list[object]``, ``"spot" | "on-demand" | "auto"``.
"""
# Unwrap Annotated
if get_origin(annotation) is Annotated:
return get_friendly_type(get_args(annotation)[0])
# Handle Union (including Optional)
if get_origin(annotation) is Union:
args = [a for a in get_args(annotation) if a is not type(None)]
if not args:
return ""
parts: list = []
for arg in args:
friendly = get_friendly_type(arg)
# Split compound types (e.g., "int | str" from Range) to deduplicate,
# but avoid splitting types that contain brackets (e.g., list[...])
if "[" not in friendly:
for part in friendly.split(" | "):
if part and part not in parts:
parts.append(part)
else:
if friendly and friendly not in parts:
parts.append(friendly)
parts.sort(key=_type_sort_key)
return " | ".join(parts)
# Handle Literal — show as enum (specific values are in the field description)
if get_origin(annotation) is Literal:
return "enum"
# Handle list
if get_origin(annotation) is list:
args = get_args(annotation)
if args:
inner = get_friendly_type(args[0])
return f"list[{inner}]"
return "list"
# Handle dict
if get_origin(annotation) is dict:
return "dict"
# Handle concrete classes
if inspect.isclass(annotation):
# Enum — list values
if issubclass(annotation, Enum):
values = [e.value for e in annotation]
return " | ".join(f'"{v}"' for v in values)
# Range — depends on inner type parameter
if issubclass(annotation, Range):
min_field = annotation.__fields__.get("min")
if min_field and inspect.isclass(min_field.type_):
# Range[Memory] → str, Range[int] → int | str
if issubclass(min_field.type_, float):
return "str"
return "int | str"
# Memory (float subclass that parses "8GB" strings)
from dstack._internal.core.models.resources import Memory as _Memory
if issubclass(annotation, _Memory):
return "str"
# BaseModel subclass (not Range)
if issubclass(annotation, BaseModel) and not issubclass(annotation, Range):
# Root models (with __root__ field) — resolve from the root type
if "__root__" in annotation.__fields__:
return get_friendly_type(annotation.__fields__["__root__"].annotation)
# Models with custom __get_validators__ accept primitive input (int, str)
# in addition to the full object form (e.g., GPUSpec, CPUSpec, DiskSpec)
if "__get_validators__" in annotation.__dict__:
return "int | str | object"
return "object"
# ComputeCapability (tuple subclass that parses "7.5" strings)
if annotation.__name__ == "ComputeCapability":
return "float | str"
# Constrained and primitive types — check MRO
# bool must come before int (bool is a subclass of int)
if issubclass(annotation, bool):
return "bool"
if issubclass(annotation, int):
# Duration (int subclass that parses "5m" strings)
if annotation.__name__ == "Duration":
return "int | str"
return "int"
if issubclass(annotation, float):
return "float"
if issubclass(annotation, str):
return "str"
if issubclass(annotation, (list, tuple)):
return "list"
if issubclass(annotation, dict):
return "dict"
return annotation.__name__
return str(annotation)
_JSON_SCHEMA_TYPE_MAP = {
"string": "str",
"integer": "int",
"number": "float",
"boolean": "bool",
"array": "list",
"object": "object",
}
def _enrich_type_from_schema(friendly_type: str, prop_schema: Dict[str, Any]) -> str:
"""Enrich the friendly type with extra accepted types from the JSON schema.
Models may define ``schema_extra`` that adds ``anyOf`` entries for fields
that accept alternative input types (e.g., duration fields typed as ``int``
but also accepting ``str`` like ``"5m"``).
"""
any_of = prop_schema.get("anyOf")
if not any_of:
return friendly_type
# Only consider string/integer — the most common alternative input types.
# Skip boolean (typically a backward-compat artifact) and object/array.
_ENRICHABLE = {"string": "str", "integer": "int"}
schema_types = set()
for entry in any_of:
mapped = _ENRICHABLE.get(entry.get("type", ""))
if mapped:
schema_types.add(mapped)
# Add any schema types not already present in the friendly type
current_parts = [p.strip() for p in friendly_type.split(" | ")]
new_parts = schema_types - set(current_parts)
if not new_parts:
return friendly_type
all_parts = list(set(current_parts) | new_parts)
# If str is now present, enum is redundant
if "str" in all_parts and "enum" in all_parts:
all_parts.remove("enum")
all_parts.sort(key=_type_sort_key)
return " | ".join(all_parts)
def generate_schema_reference(
model_path: str,
*,
overrides: Dict[str, Dict[str, Any]] = None,
prefix: str = "",
) -> str:
module, model_name = model_path.rsplit(".", maxsplit=1)
cls = getattr(importlib.import_module(module), model_name)
rows = []
if (
not overrides
or "show_root_heading" not in overrides
or overrides.get("show_root_heading") is True
):
rows.extend(
[
prefix + f"### {cls.__name__}",
"",
]
)
# Get JSON schema to detect extra accepted types from schema_extra
try:
schema_props = cls.schema().get("properties", {})
except Exception:
schema_props = {}
for name, field in cls.__fields__.items():
default = field.default
if isinstance(default, Enum):
default = default.value
friendly_type = get_friendly_type(field.annotation)
friendly_type = _enrich_type_from_schema(friendly_type, schema_props.get(name, {}))
values = dict(
name=name,
description=field.field_info.description,
type=friendly_type,
default=default,
required=field.required,
)
# TODO: If the field doesn't have description (e.g. BaseConfiguration.type), we could fallback to docstring
if values["description"]:
if overrides and name in overrides:
values.update(overrides[name])
field_type = next(iter(get_args(field.annotation)), None)
# TODO: This is a dirty workaround
if field_type:
if field.annotation.__name__ == "Annotated":
if field_type.__name__ in ["Optional", "List", "list", "Union"]:
field_type = get_args(field_type)[0]
base_model = _is_linkable_type(field_type)
else:
base_model = False
_defaults = (
f"Defaults to `{values['default']}`."
if not base_model and values.get("default")
else ""
)
_must_be = (
f"Must be `{values['default']}`."
if not base_model and values.get("default")
else ""
)
if overrides and "item_id_prefix" in overrides:
item_id_prefix = overrides["item_id_prefix"]
else:
item_id_prefix = ""
if hasattr(field_type, "__name__") and overrides and "item_id_mapping" in overrides:
link_name = overrides["item_id_mapping"].get(values["name"]) or values["name"]
else:
link_name = values["name"]
item_header = (
f"`{values['name']}`"
if not base_model
else f"[`{values['name']}`](#{item_id_prefix}{link_name})"
)
item_required_marker = "(Required)" if values["required"] else "(Optional)"
item_type_display = f"`{values['type']}`" if values.get("type") else ""
item_description = (values["description"]).replace("\n", "<br>") + "."
item_default = _defaults if not values["required"] else _must_be
item_id = f"#{values['name']}" if not base_model else f"#_{values['name']}"
item_toc_label = f"data-toc-label='{values['name']}'"
item_css_cass = "class='reference-item'"
parts = [
f"###### {item_header}",
"-",
item_required_marker,
item_type_display,
item_description,
item_default,
"{",
item_id,
item_toc_label,
item_css_cass,
"}",
]
rows.append(prefix + " ".join(p for p in parts if p))
return "\n".join(rows)
def sub_schema_reference(match: re.Match) -> str:
logger.debug("Generating schema reference for `%s`", match.group(2))
options = yaml.safe_load("\n".join(row[4:] for row in match.group(3).split("\n")))
logger.debug("Options: %s", options)
return (
generate_schema_reference(match.group(2), **(options or {}), prefix=match.group(1))
+ "\n\n"
)
def expand_schema_references(text: str) -> str:
"""Expand #SCHEMA# placeholders in markdown text. Used by hooks when gen-files is not used."""
return re.sub(
r"( *)#SCHEMA#\s+(dstack\.[.a-z_0-9A-Z]+)\s*((?:\n {4}[^\n]+)*)\n",
sub_schema_reference,
text,
)
def process_file(file: File):
if not fnmatch(file.src_uri, FILE_PATTERN):
return
logger.debug("Looking for schema references in `%s`", file.src_uri)
with mkdocs_gen_files.open(file.src_uri, "r") as f:
text = f.read()
# Pattern:
# #SCHEMA# dstack.<module>.<model>
# overrides:
# name:
# required: true
text = expand_schema_references(text)
with mkdocs_gen_files.open(file.src_uri, "w") as f:
f.write(text)
def main():
# Processing sequentially since there is no speed up with concurrent processing
for file in mkdocs_gen_files.files:
process_file(file)
main()