Skip to content

Bug? CHECKPOINT_URL doesn't work #2

@jochemstoel

Description

@jochemstoel

I think there might be a bug? I entered a checkpoint URL in the dockerfile but it tried to download a model from HuggingFace in stead.

# Unlikely you'll ever want to change this.
ARG FROM_IMAGE="gadicc/diffusers-api"
FROM ${FROM_IMAGE} as base
ENV FROM_IMAGE=${FROM_IMAGE}

# Added by Super Jochem
ARG HF_AUTH_TOKEN="***********************"
ENV HF_AUTH_TOKEN=${HF_AUTH_TOKEN}


# Model id, precision, etc.
ARG MODEL_ID="bewbs"
ENV MODEL_ID=${MODEL_ID}
ARG HF_MODEL_ID=""
ENV HF_MODEL_ID=${HF_MODEL_ID}
ARG MODEL_PRECISION=""
ENV MODEL_PRECISION=${MODEL_PRECISION}
ARG MODEL_REVISION=""
ENV MODEL_REVISION=${MODEL_REVISION}
#ARG MODEL_URL="s3://"
ARG MODEL_URL=""
ENV MODEL_URL=${MODEL_URL}

# To use a .ckpt file, put the details here.
ARG CHECKPOINT_URL="https://*****/models/sd/bewbs.ckpt"
ENV CHECKPOINT_URL=${CHECKPOINT_URL}
ARG CHECKPOINT_CONFIG_URL=""
ENV CHECKPOINT_CONFIG_URL=${CHECKPOINT_CONFIG_URL}

ARG PIPELINE="ALL"
ENV PIPELINE=${PIPELINE}

# AWS / S3-compatible storage (see docs)
ARG AWS_ACCESS_KEY_ID
ARG AWS_SECRET_ACCESS_KEY
# AWS, use "us-west-1" for banana; leave blank for Cloudflare R2.
ARG AWS_DEFAULT_REGION
ARG AWS_S3_DEFAULT_BUCKET
# Only if your non-AWS S3-compatible provider told you exactly what
# to put here (e.g. for Cloudflare R2, etc.)
ARG AWS_S3_ENDPOINT_URL

ENV AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
ENV AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
ENV AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION}
ENV AWS_S3_DEFAULT_BUCKET=${AWS_S3_DEFAULT_BUCKET}
ENV AWS_S3_ENDPOINT_URL=${AWS_S3_ENDPOINT_URL}

# Download the model
ENV RUNTIME_DOWNLOADS=0
RUN python3 download.py

# Send (optionally signed) status updates to a REST endpoint
ARG SEND_URL="https://*******/echo"
ENV SEND_URL=${SEND_URL}
ARG SIGN_KEY
ENV SIGN_KEY=${SIGN_KEY}

# Override only if you know you need to turn this off
ARG SAFETENSORS_FAST_GPU=1
ENV SAFETENSORS_FAST_GPU=${SAFETENSORS_FAST_GPU}

CMD python3 -u server.py

Error:

{"stream":"\u001b[91m    download_model(\n  File \"/api/download.py\", line 148, in download_model\n\u001b[0m"}
{"stream":"\u001b[91m    loadModel(\n  File \"/api/loadModel.py\", line 52, in loadModel\n\u001b[0m"}
{"stream":"\u001b[91m    scheduler = getScheduler(model_id, DEFAULT_SCHEDULER, not load)\n  File \"/api/getScheduler.py\", line 88, in getScheduler\n\u001b[0m"}
{"stream":"\u001b[91m    scheduler = initScheduler(MODEL_ID, scheduler_id, download)\n  File \"/api/getScheduler.py\", line 50, in initScheduler\n\u001b[0m"}
{"stream":"\u001b[91m    inittedScheduler = scheduler.from_pretrained(\n\u001b[0m"}
{"stream":"\u001b[91m  File \"/api/diffusers/src/diffusers/schedulers/scheduling_utils.py\", line 134, in from_pretrained\n\u001b[0m"}
{"stream":"\u001b[91m    config, kwargs = cls.load_config(\n  File \"/api/diffusers/src/diffusers/configuration_utils.py\", line 341, in load_config\n\u001b[0m"}
{"stream":"\u001b[91m    raise EnvironmentError(\nOSError: bewbs is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having permission to this repo with `use_auth_token` or log in with `huggingface-cli login`.\n\u001b[0m"}
{"stream":"\u001b[91mERROR conda.cli.main_run:execute(47): `conda run /bin/bash -c python3 download.py` failed. (See above for error)\n\u001b[0m"}

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions