@@ -22,7 +22,7 @@ requires-python = ">=3.12,<3.14"
2222readme = " README.md"
2323license = {file = " LICENSE" }
2424dependencies = [
25- " fastapi>=0.115.6 " ,
25+ " fastapi>=0.115.12 " ,
2626 " uvicorn>=0.34.3" ,
2727 " kubernetes>=30.1.0" ,
2828 " llama-stack==0.2.17" ,
@@ -33,11 +33,12 @@ dependencies = [
3333 " starlette>=0.47.1" ,
3434 " aiohttp>=3.12.14" ,
3535 " authlib>=1.6.0" ,
36- " openai==1.99.1" ,
37- " sqlalchemy>=2.0.42" ,
3836 " email-validator>=2.2.0" ,
37+ " openai==1.99.9" ,
38+ " sqlalchemy>=2.0.42" ,
3939]
4040
41+
4142[tool .pyright ]
4243exclude = [
4344 # TODO(lucasagomes): This module was copied from road-core
@@ -61,6 +62,15 @@ lightspeed-stack = "lightspeed_stack:main"
6162Homepage = " https://github.com/lightspeed-core/lightspeed-stack"
6263Issues = " https://github.com/lightspeed-core/lightspeed-stack/issues"
6364
65+ # PyTorch has multiple wheel variants for different backends - cpu, gpu, etc.
66+ # By default on pypi.org is the gpu variant. Forces uv to use the cpu variant.
67+ [[tool .uv .index ]]
68+ name = " pytorch-cpu"
69+ url = " https://download.pytorch.org/whl/cpu"
70+ explicit = true
71+ [tool .uv .sources ]
72+ torch = [{ index = " pytorch-cpu" }]
73+
6474[dependency-groups ]
6575dev = [
6676 " black>=25.1.0" ,
@@ -83,23 +93,49 @@ dev = [
8393 " openapi-to-md>=0.1.0b2" ,
8494]
8595llslibdev = [
86- " fastapi>=0.115.12" ,
87- " opentelemetry-sdk>=1.34.0" ,
88- " opentelemetry-exporter-otlp>=1.34.0" ,
89- " opentelemetry-instrumentation>=0.55b0" ,
96+ # To check llama-stack API provider dependecies:
97+ #
98+ # $ uv run llama stack list-providers
99+ #
100+ # API agents: inline::meta-reference
101+ " matplotlib>=3.10.0" ,
102+ " pillow>=11.1.0" ,
103+ " pandas>=2.2.3" ,
104+ " scikit-learn>=1.5.2" ,
105+ " psycopg2-binary>=2.9.10" ,
106+ # API eval: inline::meta-reference
107+ " tree_sitter>=0.24.0" ,
108+ " pythainlp>=3.0.10" ,
109+ " langdetect>=1.0.9" ,
110+ " emoji>=2.1.0" ,
111+ " nltk>=3.8.1" ,
112+ # API inference: remote::gemini
113+ " litellm>=1.75.5.post1" ,
114+ # API vector_io: inline::faiss
115+ " faiss-cpu>=1.11.0" ,
116+ # API scoring: inline::basic
117+ " requests>=2.32.4" ,
118+ # API datasetio: inline::localfs
90119 " aiosqlite>=0.21.0" ,
91- " litellm>=1.72.1" ,
92- " uvicorn>=0.34.3" ,
93- " blobfile>=3.0.0" ,
120+ # API datasetio: remote::huggingface
94121 " datasets>=3.6.0" ,
95- " sqlalchemy>=2.0.41" ,
96- " faiss-cpu>=1.11.0" ,
122+ # API telemetry: inline::meta-reference
123+ " opentelemetry-sdk>=1.34.1" ,
124+ " opentelemetry-exporter-otlp>=1.34.1" ,
125+ # API tool_runtime: inline::rag-runtime
126+ " transformers>=4.34.0" ,
127+ " numpy==2.2.6" ,
128+ # API tool_runtime: remote::model-context-protocol
97129 " mcp>=1.9.4" ,
130+ # Other
98131 " autoevals>=0.0.129" ,
99- " psutil>=7.0.0" ,
100- " torch>=2.7.1" ,
132+ " torch==2.7.1" ,
101133 " peft>=0.15.2" ,
102134 " trl>=0.18.2" ,
135+ " fire>=0.7.0" ,
136+ " opentelemetry-instrumentation>=0.55b0" ,
137+ " blobfile>=3.0.0" ,
138+ " psutil>=7.0.0" ,
103139]
104140
105141build = [
0 commit comments