Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion api/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ dependencies = [
"boto3==1.40.70",
"datasets==3.6.0",
"diffusers==0.36.0",
"do-i-have-the-vram==0.1.0",
"evaluate==0.4.3",
"fastapi-users[sqlalchemy,oauth]==15.0.1",
"hf_xet==1.1.4",
Expand Down
30 changes: 0 additions & 30 deletions api/transformerlab/routers/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@
from transformerlab.models import filesystemmodel
import transformerlab.services.job_service as job_service
from transformerlab.services.job_service import job_update_status
from transformerlab.services import vram_service
from transformerlab.schemas.vram import VramEstimateResponse
from lab.dirs import get_workspace_dir
from lab.model import Model as ModelService
from lab import storage
Expand Down Expand Up @@ -218,34 +216,6 @@ async def model_gallery(model_id: str):
return await get_model_details_from_gallery(model_id)


@router.get("/model/vram_estimate", response_model=VramEstimateResponse)
async def model_vram_estimate(
model_id: str,
dtype: str = "float16",
batch: int = 1,
seq_len: int = 4096,
no_kv: bool = False,
filename: str | None = None,
x_team_id: str | None = Header(None, alias="X-Team-Id"),
user: User = Depends(current_active_user),
) -> VramEstimateResponse:
model_id = model_id.replace("~~~", "/")
user_id = str(user.id) if user else None
return await vram_service.estimate_vram(
model_id=model_id,
dtype=dtype,
batch=batch,
seq_len=seq_len,
no_kv=no_kv,
filename=filename,
user_id=user_id,
team_id=x_team_id,
)


# Should this be a POST request?


@router.get("/model/upload_to_huggingface", summary="Given a model ID, upload it to Hugging Face.")
async def upload_model_to_huggingface(
model_id: str, model_name: str = "transformerlab-model", organization_name: str = "", model_card_data: str = "{}"
Expand Down
3 changes: 0 additions & 3 deletions api/transformerlab/routers/serverinfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,6 @@ async def get_macmon_data():
@router.get("/info")
async def get_computer_information():
# start with our static system information and add current performance details
if os.environ.get("MULTIUSER", "true").lower() == "true":
return system_info

r = system_info

# Get the current disk usage if its a mac
Expand Down
24 changes: 0 additions & 24 deletions api/transformerlab/schemas/vram.py

This file was deleted.

Loading
Loading