diff --git a/README.md b/README.md
index c91853a..829d811 100644
--- a/README.md
+++ b/README.md
@@ -29,7 +29,7 @@ git clone https://github.com/SillyLossy/TavernAI-extras
cd TavernAI-extras
pip install -r requirements.txt
```
-* Run `python server.py`
+* Run `python server.py --enable-modules=caption`
* Get the API URL. Defaults to `http://localhost:5100` if you run locally.
* Start TavernAI with extensions support: set `enableExtensions` to `true` in [config.conf](https://github.com/SillyLossy/TavernAI/blob/dev/config.conf)
* Navigate to TavernAI settings and put in an API URL and tap "Connect" to load the extensions
@@ -41,7 +41,7 @@ python server.py
#### Option 2 - Vanilla
* Install Python 3.10
* Run `pip install -r requirements.txt`
-* Run `python server.py`
+* Run `python server.py --enable-modules=caption`
* Get the API URL. Defaults to `http://localhost:5100` if you run locally.
* Start TavernAI with extensions support: set `enableExtensions` to `true` in [config.conf](https://github.com/SillyLossy/TavernAI/blob/dev/config.conf)
* Navigate to TavernAI settings and put in an API URL and tap "Connect" to load the extensions
@@ -222,6 +222,7 @@ File content
## Additional options
| Flag | Description |
| ------------------------ | ---------------------------------------------------------------------- |
+| `--enable-modules` | **Required option**. Provide a list of enabled modules.
Expects a comma-separated list of module names. See [Modules](#modules)
Example: `--enable-modules=caption,sd` |
| `--port` | Specify the port on which the application is hosted. Default: **5100** |
| `--listen` | Host the app on the local network |
| `--share` | Share the app on CloudFlare tunnel |
@@ -233,4 +234,3 @@ File content
| `--prompt-model` | Load a custom prompt generation model.
Expects a HuggingFace model ID.
Default: [FredZhang7/anime-anything-promptgen-v2](https://huggingface.co/FredZhang7/anime-anything-promptgen-v2) |
| `--sd-model` | Load a custom Stable Diffusion image generation model.
Expects a HuggingFace model ID.
Default: [ckpt/anything-v4.5-vae-swapped](https://huggingface.co/ckpt/anything-v4.5-vae-swapped)
*Must have VAE pre-baked in PyTorch format or the output will look drab!* |
| `--sd-cpu` | Force the Stable Diffusion generation pipeline to run on the CPU.
**SLOW!** |
-| `--enable-modules` | Override a list of enabled modules. Runs with everything enabled by default.
Expects a comma-separated list of module names. See [Modules](#modules)
Example: `--enable-modules=caption,sd` |
diff --git a/server.py b/server.py
index 86d1303..654d666 100644
--- a/server.py
+++ b/server.py
@@ -29,7 +29,7 @@ DEFAULT_CAPTIONING_MODEL = 'Salesforce/blip-image-captioning-base'
DEFAULT_KEYPHRASE_MODEL = 'ml6team/keyphrase-extraction-distilbert-inspec'
DEFAULT_PROMPT_MODEL = 'FredZhang7/anime-anything-promptgen-v2'
DEFAULT_SD_MODEL = "ckpt/anything-v4.5-vae-swapped"
-ALL_MODULES = ['caption', 'summarize', 'classify', 'keywords', 'prompt', 'sd', 'tts']
+#ALL_MODULES = ['caption', 'summarize', 'classify', 'keywords', 'prompt', 'sd', 'tts']
DEFAULT_SUMMARIZE_PARAMS = {
'temperature': 1.0,
'repetition_penalty': 1.0,
@@ -81,7 +81,12 @@ captioning_model = args.captioning_model if args.captioning_model else DEFAULT_C
keyphrase_model = args.keyphrase_model if args.keyphrase_model else DEFAULT_KEYPHRASE_MODEL
prompt_model = args.prompt_model if args.prompt_model else DEFAULT_PROMPT_MODEL
sd_model = args.sd_model if args.sd_model else DEFAULT_SD_MODEL
-modules = args.enable_modules if args.enable_modules and len(args.enable_modules) > 0 else ALL_MODULES
+modules = args.enable_modules if args.enable_modules and len(args.enable_modules) > 0 else []
+
+if len(modules) == 0:
+ print('You did not select any modules to run! Choose them by adding an --enable-modules option')
+ print('Example: --enable-modules=caption,summarize')
+ exit(1)
# Models init
device_string = "cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu"