Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Making model backends respond to a specific type in the aiserver menu for now #373

Merged
merged 6 commits into from
Jun 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 13 additions & 5 deletions aiserver.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,14 +626,20 @@ def UI_2_log_history(message):
import importlib
model_backend_code = {}
model_backends = {}
model_backend_type_crosswalk = {}
for module in os.listdir("./modeling/inference_models"):
if not os.path.isfile(os.path.join("./modeling/inference_models",module)) and module != '__pycache__':
try:
model_backend_code[module] = importlib.import_module('modeling.inference_models.{}.class'.format(module))
model_backends[model_backend_code[module].model_backend_name] = model_backend_code[module].model_backend()
if 'disable' in vars(model_backends[model_backend_code[module].model_backend_name]):
if model_backends[model_backend_code[module].model_backend_name].disable:
del model_backends[model_backend_code[module].model_backend_name]
if 'disable' in vars(model_backends[model_backend_code[module].model_backend_name]) and model_backends[model_backend_code[module].model_backend_name].disable:
del model_backends[model_backend_code[module].model_backend_name]
else:
if model_backend_code[module].model_backend_type in model_backend_type_crosswalk:
model_backend_type_crosswalk[model_backend_code[module].model_backend_type].append(model_backend_code[module].model_backend_name)
else:
model_backend_type_crosswalk[model_backend_code[module].model_backend_type] = [model_backend_code[module].model_backend_name]

except Exception:
logger.error("Model Backend {} failed to load".format(module))
logger.error(traceback.format_exc())
Expand Down Expand Up @@ -6211,6 +6217,7 @@ def UI_2_load_model_button(data):
@socketio.on('select_model')
@logger.catch
def UI_2_select_model(data):
global model_backend_type_crosswalk #No idea why I have to make this a global where I don't for model_backends...
logger.debug("Clicked on model entry: {}".format(data))
if data["name"] in model_menu and data['ismenu'] == "true":
emit("open_model_load_menu", {"items": [{**item.to_json(), **{"menu":data["name"]}} for item in model_menu[data["name"]] if item.should_show()]})
Expand All @@ -6220,8 +6227,9 @@ def UI_2_select_model(data):
valid_loaders = {}
if data['id'] in [item.name for sublist in model_menu for item in model_menu[sublist]]:
#Here if we have a model id that's in our menu, we explicitly use that backend
for model_backend in set([item.model_backend for sublist in model_menu for item in model_menu[sublist] if item.name == data['id']]):
valid_loaders[model_backend] = model_backends[model_backend].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"])
for model_backend_type in set([item.model_backend for sublist in model_menu for item in model_menu[sublist] if item.name == data['id']]):
for model_backend in model_backend_type_crosswalk[model_backend_type]:
valid_loaders[model_backend] = model_backends[model_backend].get_requested_parameters(data["name"], data["path"] if 'path' in data else None, data["menu"])
emit("selected_model_info", {"model_backends": valid_loaders})
else:
#Here we have a model that's not in our menu structure (either a custom model or a custom path
Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/api/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
)

model_backend_name = "KoboldAI API"
model_backend_type = "KoboldAI API" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)

class APIException(Exception):
"""To be used for errors when using the Kobold API as an interface."""
Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/basic_api/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@


model_backend_name = "KoboldAI Old Colab Method"
model_backend_type = "KoboldAI Old Colab Method" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)

class BasicAPIException(Exception):
"""To be used for errors when using the Basic API as an interface."""
Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/generic_hf_torch/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from modeling.inference_models.hf_torch import HFTorchInferenceModel

model_backend_name = "Huggingface"
model_backend_type = "Huggingface" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)

class model_backend(HFTorchInferenceModel):

Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/gooseai/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend

model_backend_name = "GooseAI"
model_backend_type = "GooseAI" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)

class OpenAIAPIError(Exception):
def __init__(self, error_type: str, error_message) -> None:
Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/hf_mtj/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from modeling.tokenizer import GenericTokenizer

model_backend_name = "Huggingface MTJ"
model_backend_type = "Huggingface" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)


class model_backend(HFInferenceModel):
Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/horde/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
)

model_backend_name = "Horde"
model_backend_type = "Horde" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)

class HordeException(Exception):
"""To be used for errors on server side of the Horde."""
Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/openai/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from modeling.inference_models.openai_gooseai import model_backend as openai_gooseai_model_backend

model_backend_name = "OpenAI"
model_backend_type = "OpenAI" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)

class OpenAIAPIError(Exception):
def __init__(self, error_type: str, error_message) -> None:
Expand Down
1 change: 1 addition & 0 deletions modeling/inference_models/readonly/class.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
)

model_backend_name = "Read Only"
model_backend_type = "Read Only" #This should be a generic name in case multiple model backends are compatible (think Hugging Face Custom and Basic Hugging Face)

class BasicAPIException(Exception):
"""To be used for errors when using the Basic API as an interface."""
Expand Down