agentlans commited on
Commit
4538f23
1 Parent(s): 84f18b9

Upload folder using huggingface_hub

Browse files
Llama3.1-censor-lora-16/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Llama3.1-vodka-ported2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "post_attention_layernorm",
18
+ "norm",
19
+ "input_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 16,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "q_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "up_proj",
30
+ "lm_head",
31
+ "down_proj",
32
+ "o_proj",
33
+ "k_proj",
34
+ "embed_tokens"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-censor-lora-16/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5631330bbefbd01d5b167a62d6aad8c433d35bdc23f2a4ef6fe51402850e4534
3
+ size 92958096
Llama3.1-censor-lora-2/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Llama3.1-vodka-ported2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 2,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "input_layernorm",
18
+ "norm",
19
+ "post_attention_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 2,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "lm_head",
27
+ "v_proj",
28
+ "gate_proj",
29
+ "down_proj",
30
+ "o_proj",
31
+ "embed_tokens",
32
+ "k_proj",
33
+ "up_proj",
34
+ "q_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-censor-lora-2/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6ff7e247ee69053e6b71cc75208f9ee2dab92ed2ae99cef0480e335dd0ee258
3
+ size 12144792
Llama3.1-censor-lora-32/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Llama3.1-vodka-ported2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "input_layernorm",
18
+ "norm",
19
+ "post_attention_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "up_proj",
27
+ "k_proj",
28
+ "down_proj",
29
+ "o_proj",
30
+ "embed_tokens",
31
+ "gate_proj",
32
+ "v_proj",
33
+ "q_proj",
34
+ "lm_head"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-censor-lora-32/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b74ab6ea702d127eb7d289d736bea386a54db211fe2b32c9873c4298eb43c5d
3
+ size 185315248
Llama3.1-censor-lora-4/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Llama3.1-vodka-ported2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 4,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "post_attention_layernorm",
18
+ "input_layernorm",
19
+ "norm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 4,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v_proj",
27
+ "up_proj",
28
+ "q_proj",
29
+ "down_proj",
30
+ "o_proj",
31
+ "lm_head",
32
+ "embed_tokens",
33
+ "k_proj",
34
+ "gate_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-censor-lora-4/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ea5bb6d0bcb593da87f4f36722a9a7d2df6d2dc93f07c54ab01e4571d4ccb4e
3
+ size 23689832
Llama3.1-censor-lora-64/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Llama3.1-vodka-ported2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 64,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "input_layernorm",
18
+ "post_attention_layernorm",
19
+ "norm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v_proj",
27
+ "lm_head",
28
+ "o_proj",
29
+ "down_proj",
30
+ "gate_proj",
31
+ "up_proj",
32
+ "embed_tokens",
33
+ "k_proj",
34
+ "q_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-censor-lora-64/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ba1b0a56a736daabd418279d1823e653abdcc4f7fdf75c684b7726b637cfb4
3
+ size 370028768
Llama3.1-censor-lora-8/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Llama3.1-vodka-ported2",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 8,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "norm",
18
+ "input_layernorm",
19
+ "post_attention_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "down_proj",
27
+ "embed_tokens",
28
+ "o_proj",
29
+ "k_proj",
30
+ "q_proj",
31
+ "lm_head",
32
+ "v_proj",
33
+ "gate_proj",
34
+ "up_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-censor-lora-8/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5e7bd8097db6abaf1d696e309c07ce0e86db5aceed10f8ac5705348b72e3f71
3
+ size 46779216
Llama3.1-uncensor-lora-16/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Meta-Llama-3.1-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "input_layernorm",
18
+ "norm",
19
+ "post_attention_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 16,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "o_proj",
27
+ "q_proj",
28
+ "gate_proj",
29
+ "embed_tokens",
30
+ "k_proj",
31
+ "up_proj",
32
+ "v_proj",
33
+ "lm_head",
34
+ "down_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-uncensor-lora-16/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a9c5ce48f223a70c9d74ba56fbd0f6d35d61002c39d786790692bb2176132eb
3
+ size 92958096
Llama3.1-uncensor-lora-2/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Meta-Llama-3.1-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 2,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "input_layernorm",
18
+ "norm",
19
+ "post_attention_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 2,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "up_proj",
27
+ "lm_head",
28
+ "embed_tokens",
29
+ "o_proj",
30
+ "gate_proj",
31
+ "down_proj",
32
+ "k_proj",
33
+ "q_proj",
34
+ "v_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-uncensor-lora-2/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b0423cf033098309f5d9cf5a7ac06d4a0dae84dbcf6ac144858ff3270fcfc8b
3
+ size 12144792
Llama3.1-uncensor-lora-32/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Meta-Llama-3.1-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 32,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "post_attention_layernorm",
18
+ "norm",
19
+ "input_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 32,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "up_proj",
27
+ "gate_proj",
28
+ "q_proj",
29
+ "embed_tokens",
30
+ "v_proj",
31
+ "k_proj",
32
+ "down_proj",
33
+ "o_proj",
34
+ "lm_head"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-uncensor-lora-32/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f789eca2da7273a899d7d5b1d844e5e556712857a6bf01d7f771ab4441c3435
3
+ size 185315248
Llama3.1-uncensor-lora-4/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Meta-Llama-3.1-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 4,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "norm",
18
+ "input_layernorm",
19
+ "post_attention_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 4,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "k_proj",
27
+ "down_proj",
28
+ "o_proj",
29
+ "up_proj",
30
+ "gate_proj",
31
+ "lm_head",
32
+ "v_proj",
33
+ "q_proj",
34
+ "embed_tokens"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-uncensor-lora-4/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6711e62da61d64d43031e8a8000bc117cb4cee84b61e5e6c355aa7e5bf35d86f
3
+ size 23689832
Llama3.1-uncensor-lora-64/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Meta-Llama-3.1-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 64,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "post_attention_layernorm",
18
+ "norm",
19
+ "input_layernorm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 64,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "embed_tokens",
27
+ "o_proj",
28
+ "lm_head",
29
+ "k_proj",
30
+ "q_proj",
31
+ "down_proj",
32
+ "v_proj",
33
+ "gate_proj",
34
+ "up_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-uncensor-lora-64/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84502fc9ab6654c94a16bef7c9285da584cc414848cc3c437b33664b0232abf3
3
+ size 370028768
Llama3.1-uncensor-lora-8/adapter_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/drive2/Meta-Llama-3.1-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 8,
13
+ "lora_dropout": 0,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "input_layernorm",
18
+ "post_attention_layernorm",
19
+ "norm"
20
+ ],
21
+ "peft_type": "LORA",
22
+ "r": 8,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v_proj",
27
+ "up_proj",
28
+ "gate_proj",
29
+ "lm_head",
30
+ "k_proj",
31
+ "q_proj",
32
+ "down_proj",
33
+ "embed_tokens",
34
+ "o_proj"
35
+ ],
36
+ "task_type": "CAUSAL_LM",
37
+ "use_rslora": false
38
+ }
Llama3.1-uncensor-lora-8/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40121d43927e1adfafdd9132f51dc2277fa8bb604305790875e9fce9e0a5b609
3
+ size 46779216
README.md ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ tags:
5
+ - llama
6
+ - llama-3
7
+ - lora
8
+ - content-moderation
9
+ - text-generation
10
+ license: mit
11
+ base_model: meta-llama/Meta-Llama-3.1-8B-Instruct
12
+ ---
13
+
14
+ # Llama 3.1 Censorship LoRAs
15
+
16
+ This repository contains LoRA adapters for Meta's Llama 3.1 8B Instruct model, designed for censoring and uncensoring text content.
17
+
18
+ ## What are these LoRA adapters?
19
+
20
+ These LoRA adapters serve as fine-tuning tools for the Llama 3.1 model. They capture the differences between the original, more cautious Llama 3.1 and a version that has been adjusted to be less restrictive, (agentlans/Llama3.1-vodka)[https://huggingface.co/agentlans/Llama3.1-vodka]. These adapters adjust how the model handles potentially sensitive content.
21
+
22
+ ### The Basics
23
+
24
+ - **Base Model**: Llama 3.1 Instruct 8B
25
+ - **Comparison Model**: (agentlans/Llama3.1-vodka)[https://huggingface.co/agentlans/Llama3.1-vodka]
26
+ - **Extraction Method**: LoRA (Low-Rank Adaptation)
27
+
28
+ ### Adapter Options
29
+
30
+ Different "strengths" of adaptation are available: 2, 4, 8, 16, 32, and 64. These can be thought of as dials for determining the extent of changes to the model's behaviour.
31
+
32
+ ### Applications
33
+
34
+ - Customizing Llama 3.1 for specific content needs
35
+ - Adjusting the model's behaviour to align more closely with the censored or uncensored variant
36
+ - Experimenting with various settings to identify the most effective configuration
37
+
38
+ ### Tips for Use
39
+
40
+ - Starting with lower ranks (2, 4, 8) allows for more subtle changes
41
+ - Higher ranks (32, 64) enable larger adjustments but require more computational resources to apply to the model
42
+ - Use the lowest rank that achieves the desired effect
43
+ - For best results, use system prompts in conjunction with the LoRAs
44
+ - Always use these adapters responsibly and ethically
45
+
46
+ ## Uses and Limitations
47
+
48
+ ### The Censor-LoRA
49
+
50
+ Designed for:
51
+ - Maintaining family-friendly content
52
+ - Removing explicit language
53
+ - General content moderation
54
+
55
+ ### The Uncensor-LoRA
56
+
57
+ Intended for:
58
+ - Restoring text that may have been excessively censored
59
+ - Creative writing in more mature contexts
60
+ - Generating realistic dialogue for adult-oriented content
61
+
62
+ ### Limitations
63
+
64
+ - These adapters may occasionally over-censor or under-censor content
65
+ - They should not be the sole method for content moderation; human oversight remains crucial
66
+ - The uncensoring adapter has the potential to generate inappropriate content, necessitating careful use
67
+
68
+ ## Ethical Considerations
69
+
70
+ The use of these adapters raises several ethical concerns:
71
+
72
+ - The censoring adapter may inadvertently suppress legitimate speech or artistic expression
73
+ - The uncensoring adapter could be misused to produce harmful or offensive content
74
+ - Both adapters may reflect and potentially amplify societal biases present in the training data
75
+
76
+ Careful consideration of the implications of deploying these models is necessary, along with the implementation of appropriate safeguards to ensure responsible usage.