How to load the saved model after 2.4 Run Fine-Tuning Loop?
Opened this issue · 0 comments
chunhualiao commented
The folder does not contain the usual config.json file.
./my_model] ls
adapter_config.json README.md tokenizer_config.json tokenizer.model
adapter_model.bin special_tokens_map.json tokenizer.json training_args.bin
cat adapter_config.json
{
"auto_mapping": null,
"base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
"bias": "none",
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": true,
"layers_pattern": null,
"layers_to_transform": null,
"lora_alpha": 16,
"lora_dropout": 0.1,
"modules_to_save": null,
"peft_type": "LORA",
"r": 64,
"revision": null,
"target_modules": [
"q_proj",
"v_proj"
],
"task_type": "CAUSAL_LM"
}