""" | |
helpers for lora embeddings | |
""" | |
def get_linear_embedding_layers(model_type): | |
""" | |
returns the linear embedding layers needed for loras, dependent on the model arch | |
""" | |
if model_type == "gpt_neox": | |
return ["embed_in", "embed_out"] | |
if model_type == "falcon": | |
return ["word_embeddings", "lm_head"] | |
return ["embed_tokens", "lm_head"] | |