Update versions (#99)

- auto:
ca3e5519e8
- hlky:
1fd28eed1e
- lstein:
b40bfb5116
pull/100/head 1.2.0
AbdBarho 2 years ago committed by GitHub
parent 5bbc21ea3d
commit 43a5e5e85f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -47,7 +47,7 @@ RUN pip install --prefer-binary --no-cache-dir -r ${ROOT}/repositories/CodeForme
# Note: don't update the sha of previous versions because the install will take forever
# instead, update the repo state in a later step
ARG SHA=a2bea2f97aab6ef60afe6534611e646f66226868
ARG SHA=ca3e5519e8b6dc020c5e7ae508738afb5dc6f3ec
RUN <<EOF
cd stable-diffusion-webui
git pull --rebase

@ -9,62 +9,44 @@
"font": "DejaVuSans.ttf",
"__WARNING__": "DON'T CHANGE ANYTHING BEFORE THIS",
"samples_filename_format": "",
"outdir_grids": "",
"save_to_dirs": false,
"grid_save_to_dirs": false,
"save_to_dirs_prompt_len": 10,
"samples_save": true,
"samples_format": "png",
"grid_save": true,
"return_grid": true,
"grid_format": "png",
"grid_extended_filename": false,
"grid_only_if_multiple": true,
"n_rows": -1,
"jpeg_quality": 80,
"export_for_4chan": true,
"enable_pnginfo": true,
"add_model_hash_to_info": false,
"code_former_weight": 0.5,
"directories_filename_pattern": "",
"directories_max_prompt_words": 8,
"enable_batch_seeds": true,
"enable_emphasis": true,
"save_txt": false,
"enable_pnginfo": true,
"enable_quantization": false,
"ESRGAN_tile": 192,
"ESRGAN_tile_overlap": 8,
"random_artist_categories": [],
"upscale_at_full_resolution_padding": 16,
"show_progressbar": true,
"show_progress_every_n_steps": 7,
"multiple_tqdm": true,
"export_for_4chan": true,
"face_restoration_model": null,
"code_former_weight": 0.5,
"save_images_before_face_restoration": false,
"face_restoration_unload": false,
"interrogate_keep_models_in_memory": false,
"interrogate_use_builtin_artists": true,
"interrogate_clip_num_beams": 1,
"interrogate_clip_min_length": 24,
"interrogate_clip_max_length": 48,
"interrogate_clip_dict_limit": 1500.0,
"samples_filename_pattern": "",
"directories_filename_pattern": "",
"save_selected_only": false,
"filter_nsfw": false,
"grid_extended_filename": false,
"grid_format": "png",
"grid_only_if_multiple": true,
"grid_save": true,
"grid_save_to_dirs": false,
"img2img_color_correction": false,
"img2img_fix_steps": false,
"enable_quantization": false,
"enable_batch_seeds": true,
"memmon_poll_rate": 8,
"sd_model_checkpoint": null,
"SWIN_tile": 192,
"SWIN_tile_overlap": 8,
"ldsr_steps": 30,
"ldsr_pre_down": 1,
"ldsr_post_down": 1,
"upscaler_for_hires_fix": null,
"interrogate_clip_dict_limit": 1500,
"interrogate_clip_max_length": 48,
"interrogate_clip_min_length": 24,
"interrogate_clip_num_beams": 1,
"interrogate_keep_models_in_memory": false,
"interrogate_use_builtin_artists": true,
"jpeg_quality": 80,
"js_modal_lightbox": true,
"js_modal_lightbox_initialy_zoomed": true,
"use_original_name_batch": false,
"directories_max_prompt_words": 8,
"ldsr_post_down": 1,
"ldsr_pre_down": 1,
"ldsr_steps": 30,
"memmon_poll_rate": 8,
"multiple_tqdm": true,
"n_rows": -1,
"outdir_grids": "",
"random_artist_categories": [],
"realesrgan_enabled_models": [
"Real-ESRGAN 4x plus",
"Real-ESRGAN 4x plus anime 6B",
@ -73,5 +55,25 @@
"Real-ESRGAN General WDN x4x3",
"Real-ESRGAN General x4x3"
],
"samples_log_stdout": false
"return_grid": true,
"samples_filename_format": "",
"samples_filename_pattern": "",
"samples_format": "png",
"samples_log_stdout": false,
"samples_save": true,
"save_images_before_color_correction": false,
"save_images_before_face_restoration": false,
"save_selected_only": false,
"save_to_dirs": false,
"save_to_dirs_prompt_len": 10,
"save_txt": false,
"sd_model_checkpoint": null,
"show_progress_every_n_steps": 7,
"show_progressbar": true,
"SWIN_tile": 192,
"SWIN_tile_overlap": 8,
"upscale_at_full_resolution_padding": 16,
"upscaler_for_hires_fix": null,
"upscaler_for_img2img": null,
"use_original_name_batch": false
}

@ -22,21 +22,10 @@ conda clean -a -y
EOF
# Latent diffusion
RUN <<EOF
git clone --depth 1 https://github.com/Hafiidz/latent-diffusion.git
cd latent-diffusion
# hacks all the way down
mv ldm ldm_latent &&
sed -i -- 's/from ldm/from ldm_latent/g' *.py
# dont forget to update the yaml!!
EOF
# Note: don't update the sha of previous versions because the install will take forever
# instead, update the repo state in a later step
# ARG BRANCH=master SHA=d0bb60a139d60e6c2b9be4e18e0e29a86aa5af59
ARG BRANCH=dev SHA=f585ab1923730339ea75d36b53ac1d5b6cbde500
ARG BRANCH=dev SHA=1fd28eed1ebc3aa04b9b00e2a899f3bf07f64bdc
RUN <<EOF
cd stable-diffusion
git fetch
@ -53,9 +42,9 @@ COPY . /docker/
RUN python /docker/info.py /stable-diffusion/frontend/frontend.py && chmod +x /docker/mount.sh
WORKDIR /stable-diffusion
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch PYTHONPATH="${PYTHONPATH}:/stable-diffusion" CLI_ARGS=""
ENV TRANSFORMERS_CACHE=/cache/transformers TORCH_HOME=/cache/torch PYTHONPATH="${PYTHONPATH}:${PWD}" CLI_ARGS=""
EXPOSE 7860
# run, -u to not buffer stdout / stderr
CMD /docker/mount.sh && \
python3 -u scripts/webui.py --outdir /output --ckpt /cache/models/model.ckpt --ldsr-dir /latent-diffusion ${CLI_ARGS}
python3 -u scripts/webui.py --outdir /output --ckpt /cache/models/model.ckpt ${CLI_ARGS}
# STREAMLIT_SERVER_PORT=7860 python -m streamlit run scripts/webui_streamlit.py --theme.base dark

@ -9,8 +9,8 @@ ROOT=/stable-diffusion/src
MODELS["${ROOT}/gfpgan/experiments/pretrained_models/GFPGANv1.3.pth"]=GFPGANv1.3.pth
MODELS["${ROOT}/realesrgan/experiments/pretrained_models/RealESRGAN_x4plus.pth"]=RealESRGAN_x4plus.pth
MODELS["${ROOT}/realesrgan/experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth"]=RealESRGAN_x4plus_anime_6B.pth
MODELS["/latent-diffusion/experiments/pretrained_models/model.ckpt"]=LDSR.ckpt
# MODELS["/latent-diffusion/experiments/pretrained_models/project.yaml"]=LDSR.yaml
MODELS["${ROOT}/latent-diffusion/experiments/pretrained_models/model.ckpt"]=LDSR.ckpt
MODELS["${ROOT}/latent-diffusion/experiments/pretrained_models/project.yaml"]=LDSR.yaml
MODELS_DIR=/cache/models
@ -25,11 +25,6 @@ for path in "${!MODELS[@]}"; do
fi
done
# hack for latent-diffusion
if test -f "${MODELS_DIR}/LDSR.yaml"; then
sed 's/ldm\./ldm_latent\./g' "${MODELS_DIR}/LDSR.yaml" >/latent-diffusion/experiments/pretrained_models/project.yaml
fi
# force facexlib cache
mkdir -p /cache/weights/ /stable-diffusion/gfpgan/
ln -sf /cache/weights/ /stable-diffusion/gfpgan/

@ -1,8 +1,8 @@
general:
outdir: /outputs
default_model: "Stable Diffusion v1.4"
default_model_path: "/cache/models/model.ckpt"
default_model_path: /cache/models/model.ckpt
outdir_txt2img: /outputs/txt2img-samples
outdir_img2img: /outputs/img2img-samples
optimized_turbo: true
enable_minimal_memory_usage : True
optimized: True
optimized_turbo: True

@ -23,7 +23,7 @@ conda clean -a -y
EOF
ARG BRANCH=development SHA=50d607ffea3734072a80e38b09ba0c3758af5d40
ARG BRANCH=development SHA=b40bfb5116b7fc618f78a0d152005ceb46153443
# this breaks on generation:
# there is a new UI anyway, but it is not by any means ready.
# ARG BRANCH=development SHA=bdbc76fcd4bd3362312dc91b087d9af66de423b1

@ -7,12 +7,11 @@ ROOT=/stable-diffusion
mkdir -p "${ROOT}/models/ldm/stable-diffusion-v1/"
ln -sf /cache/models/model.ckpt "${ROOT}/models/ldm/stable-diffusion-v1/model.ckpt"
if test -f /cache/models/GFPGANv1.3.pth; then
base="${ROOT}/src/gfpgan/experiments/pretrained_models/"
mkdir -p "${base}"
ln -sf /cache/models/GFPGANv1.3.pth "${base}/GFPGANv1.3.pth"
echo "Mounted GFPGANv1.3.pth"
fi
base="${ROOT}/src/gfpgan/experiments/pretrained_models/"
mkdir -p "${base}"
# TODO: "real" GFPGANv1.4.pth
ln -sf /cache/models/GFPGANv1.3.pth "${base}/GFPGANv1.4.pth"
echo "Mounted GFPGANv1.3.pth"
# facexlib
FACEX_WEIGHTS=/opt/conda/lib/python3.9/site-packages/facexlib/weights

Loading…
Cancel
Save