diff --git a/README.md b/README.md
index 3c95d166d5884d2dc7c3d58c41da62f8717373b1..9cb44f9c3fcb8ec8308566cc10e529cb7f323a72 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@ task_ids:
   - multi-class-classification
   - sentiment-classification
 dataset_info:
-  - config_name: lite
+  - config_name: large_lite
     features:
       - name: cik
         dtype: string
@@ -89,6 +89,12 @@ dataset_info:
                   "1": negative
       - name: filingDate
         dtype: string
+      - name: docID
+        dtype: string
+      - name: sentenceID
+        dtype: string
+      - name: sentenceCount
+        dtype: int64
     splits:
       - name: train
         num_bytes: 16424576472
@@ -101,7 +107,7 @@ dataset_info:
         num_examples: 2965174
     download_size: 13362319126
     dataset_size: 17621220293
-  - config_name: full
+  - config_name: large_full
     features:
       - name: cik
         dtype: string
@@ -155,6 +161,12 @@ dataset_info:
         dtype: string
       - name: name
         dtype: string
+      - name: docID
+        dtype: string
+      - name: sentenceID
+        dtype: string
+      - name: sentenceCount
+        dtype: int64
       - name: tickers
         list: string
       - name: exchanges
@@ -223,70 +235,6 @@ dataset_info:
         num_examples: 2965174
     download_size: 13362319126
     dataset_size: 42055510172
-  - config_name: small_lite
-    features:
-      - name: cik
-        dtype: string
-      - name: sentence
-        dtype: string
-      - name: section
-        dtype:
-          class_label:
-            names:
-              "0": section_1
-              "1": section_10
-              "2": section_11
-              "3": section_12
-              "4": section_13
-              "5": section_14
-              "6": section_15
-              "7": section_1A
-              "8": section_1B
-              "9": section_2
-              "10": section_3
-              "11": section_4
-              "12": section_5
-              "13": section_6
-              "14": section_7
-              "15": section_7A
-              "16": section_8
-              "17": section_9
-              "18": section_9A
-              "19": section_9B
-      - name: labels
-        struct:
-          - name: 1d
-            dtype:
-              class_label:
-                names:
-                  "0": positive
-                  "1": negative
-          - name: 5d
-            dtype:
-              class_label:
-                names:
-                  "0": positive
-                  "1": negative
-          - name: 30d
-            dtype:
-              class_label:
-                names:
-                  "0": positive
-                  "1": negative
-      - name: filingDate
-        dtype: string
-    splits:
-      - name: train
-        num_bytes: 48766050
-        num_examples: 200000
-      - name: validation
-        num_bytes: 5581651
-        num_examples: 20000
-      - name: test
-        num_bytes: 5036956
-        num_examples: 20000
-    download_size: 13362319126
-    dataset_size: 59384657
   - config_name: small_full
     features:
       - name: cik
@@ -298,25 +246,25 @@ dataset_info:
           class_label:
             names:
               "0": section_1
-              "1": section_10
-              "2": section_11
-              "3": section_12
-              "4": section_13
-              "5": section_14
-              "6": section_15
-              "7": section_1A
-              "8": section_1B
-              "9": section_2
-              "10": section_3
-              "11": section_4
-              "12": section_5
-              "13": section_6
-              "14": section_7
-              "15": section_7A
-              "16": section_8
-              "17": section_9
-              "18": section_9A
-              "19": section_9B
+              "1": section_1A
+              "2": section_1B
+              "3": section_2
+              "4": section_3
+              "5": section_4
+              "6": section_5
+              "7": section_6
+              "8": section_7
+              "9": section_7A
+              "10": section_8
+              "11": section_9
+              "12": section_9A
+              "13": section_9B
+              "14": section_10
+              "15": section_11
+              "16": section_12
+              "17": section_13
+              "18": section_14
+              "19": section_15
       - name: labels
         struct:
           - name: 1d
@@ -341,6 +289,12 @@ dataset_info:
         dtype: string
       - name: name
         dtype: string
+      - name: docID
+        dtype: string
+      - name: sentenceID
+        dtype: string
+      - name: sentenceCount
+        dtype: int64
       - name: tickers
         list: string
       - name: exchanges
@@ -399,16 +353,86 @@ dataset_info:
                 dtype: float32
     splits:
       - name: train
-        num_bytes: 116038677
+        num_bytes: 128731540
         num_examples: 200000
       - name: validation
-        num_bytes: 12380261
+        num_bytes: 13411689
         num_examples: 20000
       - name: test
-        num_bytes: 11898871
+        num_bytes: 13188331
         num_examples: 20000
-    download_size: 13362319126
-    dataset_size: 140317809
+    download_size: 42764380
+    dataset_size: 155331560
+  - config_name: small_lite
+    features:
+      - name: cik
+        dtype: string
+      - name: sentence
+        dtype: string
+      - name: section
+        dtype:
+          class_label:
+            names:
+              "0": section_1
+              "1": section_1A
+              "2": section_1B
+              "3": section_2
+              "4": section_3
+              "5": section_4
+              "6": section_5
+              "7": section_6
+              "8": section_7
+              "9": section_7A
+              "10": section_8
+              "11": section_9
+              "12": section_9A
+              "13": section_9B
+              "14": section_10
+              "15": section_11
+              "16": section_12
+              "17": section_13
+              "18": section_14
+              "19": section_15
+      - name: labels
+        struct:
+          - name: 1d
+            dtype:
+              class_label:
+                names:
+                  "0": positive
+                  "1": negative
+          - name: 5d
+            dtype:
+              class_label:
+                names:
+                  "0": positive
+                  "1": negative
+          - name: 30d
+            dtype:
+              class_label:
+                names:
+                  "0": positive
+                  "1": negative
+      - name: filingDate
+        dtype: string
+      - name: docID
+        dtype: string
+      - name: sentenceID
+        dtype: string
+      - name: sentenceCount
+        dtype: int64
+    splits:
+      - name: train
+        num_bytes: 60681688
+        num_examples: 200000
+      - name: validation
+        num_bytes: 6677389
+        num_examples: 20000
+      - name: test
+        num_bytes: 6351730
+        num_examples: 20000
+    download_size: 42764380
+    dataset_size: 73710807
 ---
 
 # Dataset Card for [financial-reports-sec]
@@ -454,7 +478,7 @@ The dataset contains the annual report of US public firms filing with the SEC ED
 
 **Four** configurations are available:
 
-- _**lite**_:
+- _**large_lite**_:
   - Contains only the basic features needed. Extra metadata is ommitted.
   - Features List:
     - **cik**
@@ -462,7 +486,10 @@ The dataset contains the annual report of US public firms filing with the SEC ED
     - **section**
     - **labels**
     - **filingDate**
-- _**full**_:
+    - **docID**
+    - **sentenceID**
+    - **sentenceCount**
+- _**large_full**_:
   - All features are included.
   - Features List (excluding those already in the lite verison above):
     - **name**
@@ -477,9 +504,9 @@ The dataset contains the annual report of US public firms filing with the SEC ED
     - **reportDate**
     - **returns**
 - _**small_lite**_:
-  - Same as _**lite**_ version except that only (200,000/20,000/20,000) sentences are loaded for (train/test/validation) splits.
+  - Same as _**large_lite**_ version except that only (200,000/20,000/20,000) sentences are loaded for (train/test/validation) splits.
 - _**small_full**_:
-  - Same as _**full**_ version except that only (200,000/20,000/20,000) sentences are loaded for (train/test/validation) splits.
+  - Same as _**large_full**_ version except that only (200,000/20,000/20,000) sentences are loaded for (train/test/validation) splits.
 
 ### Usage
 
@@ -487,10 +514,10 @@ The dataset contains the annual report of US public firms filing with the SEC ED
 import datasets
 
 # Load the lite configuration of the dataset
-raw_dataset = datasets.load_dataset("JanosAudran/financial-reports-sec", "lite")
+raw_dataset = datasets.load_dataset("JanosAudran/financial-reports-sec", "large_lite")
 
 # Load a specific split
-raw_dataset = datasets.load_dataset("JanosAudran/financial-reports-sec", "full", split="train")
+raw_dataset = datasets.load_dataset("JanosAudran/financial-reports-sec", "small_full", split="train")
 ```
 
 ### Supported Tasks
@@ -560,6 +587,24 @@ Refer to dataset preview.
 - _string_
 - '2021-03-10'
 
+**docID**
+
+- Unique ID for identifying the exact 10-K filing. Unique across all configs and splits. Can be used to identify the document from which the sentence came from.
+- _string_
+- '0000001750_10-K_2020'
+
+**sentenceID**
+
+- Unique ID for identifying the exact sentence. Unique across all configs and splits.
+- _string_
+- '0000001750_10-K_2020_section_1_100'
+
+**sentenceCount**
+
+- Integer identiying the running sequence for the sentence. Unique **only** for a given config and split.
+- _string_
+- 123
+
 **name**
 
 - The name of the filing entity
@@ -651,8 +696,8 @@ Refer to dataset preview.
 
 | Config     |      train | validation |      test |
 | ---------- | ---------: | ---------: | --------: |
-| full       | 67,316,227 |  1,585,561 | 2,965,174 |
-| lite       | 67,316,227 |  1,585,561 | 2,965,174 |
+| large_full | 67,316,227 |  1,585,561 | 2,965,174 |
+| large_lite | 67,316,227 |  1,585,561 | 2,965,174 |
 | small_full |    200,000 |     20,000 |    20,000 |
 | small_lite |    200,000 |     20,000 |    20,000 |
 
diff --git a/data/test/shard_0.jsonl b/data/large/test/shard_0.jsonl
similarity index 100%
rename from data/test/shard_0.jsonl
rename to data/large/test/shard_0.jsonl
diff --git a/data/test/shard_1.jsonl b/data/large/test/shard_1.jsonl
similarity index 100%
rename from data/test/shard_1.jsonl
rename to data/large/test/shard_1.jsonl
diff --git a/data/test/shard_2.jsonl b/data/large/test/shard_2.jsonl
similarity index 100%
rename from data/test/shard_2.jsonl
rename to data/large/test/shard_2.jsonl
diff --git a/data/test/shard_3.jsonl b/data/large/test/shard_3.jsonl
similarity index 100%
rename from data/test/shard_3.jsonl
rename to data/large/test/shard_3.jsonl
diff --git a/data/test/shard_4.jsonl b/data/large/test/shard_4.jsonl
similarity index 100%
rename from data/test/shard_4.jsonl
rename to data/large/test/shard_4.jsonl
diff --git a/data/test/shard_5.jsonl b/data/large/test/shard_5.jsonl
similarity index 100%
rename from data/test/shard_5.jsonl
rename to data/large/test/shard_5.jsonl
diff --git a/data/test/shard_6.jsonl b/data/large/test/shard_6.jsonl
similarity index 100%
rename from data/test/shard_6.jsonl
rename to data/large/test/shard_6.jsonl
diff --git a/data/test/shard_7.jsonl b/data/large/test/shard_7.jsonl
similarity index 100%
rename from data/test/shard_7.jsonl
rename to data/large/test/shard_7.jsonl
diff --git a/data/test/shard_8.jsonl b/data/large/test/shard_8.jsonl
similarity index 100%
rename from data/test/shard_8.jsonl
rename to data/large/test/shard_8.jsonl
diff --git a/data/test/shard_9.jsonl b/data/large/test/shard_9.jsonl
similarity index 100%
rename from data/test/shard_9.jsonl
rename to data/large/test/shard_9.jsonl
diff --git a/data/train/shard_0.jsonl b/data/large/train/shard_0.jsonl
similarity index 100%
rename from data/train/shard_0.jsonl
rename to data/large/train/shard_0.jsonl
diff --git a/data/train/shard_1.jsonl b/data/large/train/shard_1.jsonl
similarity index 100%
rename from data/train/shard_1.jsonl
rename to data/large/train/shard_1.jsonl
diff --git a/data/train/shard_2.jsonl b/data/large/train/shard_2.jsonl
similarity index 100%
rename from data/train/shard_2.jsonl
rename to data/large/train/shard_2.jsonl
diff --git a/data/train/shard_3.jsonl b/data/large/train/shard_3.jsonl
similarity index 100%
rename from data/train/shard_3.jsonl
rename to data/large/train/shard_3.jsonl
diff --git a/data/train/shard_4.jsonl b/data/large/train/shard_4.jsonl
similarity index 100%
rename from data/train/shard_4.jsonl
rename to data/large/train/shard_4.jsonl
diff --git a/data/train/shard_5.jsonl b/data/large/train/shard_5.jsonl
similarity index 100%
rename from data/train/shard_5.jsonl
rename to data/large/train/shard_5.jsonl
diff --git a/data/train/shard_6.jsonl b/data/large/train/shard_6.jsonl
similarity index 100%
rename from data/train/shard_6.jsonl
rename to data/large/train/shard_6.jsonl
diff --git a/data/train/shard_7.jsonl b/data/large/train/shard_7.jsonl
similarity index 100%
rename from data/train/shard_7.jsonl
rename to data/large/train/shard_7.jsonl
diff --git a/data/train/shard_8.jsonl b/data/large/train/shard_8.jsonl
similarity index 100%
rename from data/train/shard_8.jsonl
rename to data/large/train/shard_8.jsonl
diff --git a/data/train/shard_9.jsonl b/data/large/train/shard_9.jsonl
similarity index 100%
rename from data/train/shard_9.jsonl
rename to data/large/train/shard_9.jsonl
diff --git a/data/validate/shard_0.jsonl b/data/large/validate/shard_0.jsonl
similarity index 100%
rename from data/validate/shard_0.jsonl
rename to data/large/validate/shard_0.jsonl
diff --git a/data/validate/shard_1.jsonl b/data/large/validate/shard_1.jsonl
similarity index 100%
rename from data/validate/shard_1.jsonl
rename to data/large/validate/shard_1.jsonl
diff --git a/data/validate/shard_2.jsonl b/data/large/validate/shard_2.jsonl
similarity index 100%
rename from data/validate/shard_2.jsonl
rename to data/large/validate/shard_2.jsonl
diff --git a/data/validate/shard_3.jsonl b/data/large/validate/shard_3.jsonl
similarity index 100%
rename from data/validate/shard_3.jsonl
rename to data/large/validate/shard_3.jsonl
diff --git a/data/validate/shard_4.jsonl b/data/large/validate/shard_4.jsonl
similarity index 100%
rename from data/validate/shard_4.jsonl
rename to data/large/validate/shard_4.jsonl
diff --git a/data/validate/shard_5.jsonl b/data/large/validate/shard_5.jsonl
similarity index 100%
rename from data/validate/shard_5.jsonl
rename to data/large/validate/shard_5.jsonl
diff --git a/data/validate/shard_6.jsonl b/data/large/validate/shard_6.jsonl
similarity index 100%
rename from data/validate/shard_6.jsonl
rename to data/large/validate/shard_6.jsonl
diff --git a/data/validate/shard_7.jsonl b/data/large/validate/shard_7.jsonl
similarity index 100%
rename from data/validate/shard_7.jsonl
rename to data/large/validate/shard_7.jsonl
diff --git a/data/validate/shard_8.jsonl b/data/large/validate/shard_8.jsonl
similarity index 100%
rename from data/validate/shard_8.jsonl
rename to data/large/validate/shard_8.jsonl
diff --git a/data/validate/shard_9.jsonl b/data/large/validate/shard_9.jsonl
similarity index 100%
rename from data/validate/shard_9.jsonl
rename to data/large/validate/shard_9.jsonl
diff --git a/data/small/test/shard_0.jsonl b/data/small/test/shard_0.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e7c0a26efefbcfb2823c493a6c3e5a0e2f859473
--- /dev/null
+++ b/data/small/test/shard_0.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7957e14fb6f2949c95b6ba08defbd171b5ecb286fe643897dfa245ee2f6056f0
+size 2880171
diff --git a/data/small/test/shard_1.jsonl b/data/small/test/shard_1.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..ad3767a3bc3c61416f08a37a762c4b7bacc2b252
--- /dev/null
+++ b/data/small/test/shard_1.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:763be8275d4b41e77ae0bf0ed6ac5d9c4d928eed52f7ac0cd3463b82cd16d625
+size 214400
diff --git a/data/small/test/shard_2.jsonl b/data/small/test/shard_2.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..41123b2763b6c6911d37b28cffb2a41685bc8c4e
--- /dev/null
+++ b/data/small/test/shard_2.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f4d1380f88c8e289925067043fe0ccc1b33aab1703f20ea23c756de9a1ef4ac2
+size 674148
diff --git a/data/small/test/shard_3.jsonl b/data/small/test/shard_3.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/test/shard_4.jsonl b/data/small/test/shard_4.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/test/shard_5.jsonl b/data/small/test/shard_5.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/test/shard_6.jsonl b/data/small/test/shard_6.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/test/shard_7.jsonl b/data/small/test/shard_7.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/test/shard_8.jsonl b/data/small/test/shard_8.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/test/shard_9.jsonl b/data/small/test/shard_9.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/train/shard_0.jsonl b/data/small/train/shard_0.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..39599a16a448ab1a6a51e3ef6b2e486231b6649f
--- /dev/null
+++ b/data/small/train/shard_0.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1c0acf006c527f9631d21f3036e78d05ffe02f7d7f1d447b89b96547577d043
+size 3647993
diff --git a/data/small/train/shard_1.jsonl b/data/small/train/shard_1.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..4ed0f5462418db05bb1be79ece78f896f8749cca
--- /dev/null
+++ b/data/small/train/shard_1.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65bd7dad82f53f990e48ea0ea0bae9ab154fede83ae6a1f33f6d077a7a83ca5f
+size 5293302
diff --git a/data/small/train/shard_2.jsonl b/data/small/train/shard_2.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..d2f30e8c73806b7a10c36d53efbcb54a94818092
--- /dev/null
+++ b/data/small/train/shard_2.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77d5fcb23073e92758c626b04fb35e5dec28b5d137eb50568f67e4b8eac25cb6
+size 1330864
diff --git a/data/small/train/shard_3.jsonl b/data/small/train/shard_3.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..ffc1d100268adf01ffd53c7ba76c2e8a0266e3c5
--- /dev/null
+++ b/data/small/train/shard_3.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b467eaf7b52c44de33882327fe3c00e6b84e3adb850624089e7a8d2c7ab3a7ab
+size 2585782
diff --git a/data/small/train/shard_4.jsonl b/data/small/train/shard_4.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..dc94e590c811a9aeef55c95fb31c34fb32e49543
--- /dev/null
+++ b/data/small/train/shard_4.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e2bad4dd32090852c2e762377ff27b5f627aa52043a17b8850f5ffa388cdb192
+size 2513695
diff --git a/data/small/train/shard_5.jsonl b/data/small/train/shard_5.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..5ff919964e51b09cd28d6f41b8d0312ddfd255da
--- /dev/null
+++ b/data/small/train/shard_5.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b199426dbdc113d48de6a4eeb7edfb135e6a459f6c17b629dfe8f9e890645550
+size 3196133
diff --git a/data/small/train/shard_6.jsonl b/data/small/train/shard_6.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..bb5edae928f1932ce337fe607f6f8791ff066c87
--- /dev/null
+++ b/data/small/train/shard_6.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1365acfaa3401100b121e9eb7d93abe3ceab1bf3805779a05fc37b89a2000724
+size 7294527
diff --git a/data/small/train/shard_7.jsonl b/data/small/train/shard_7.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..bb092e677d068d30b6215be3b00092f72351b494
--- /dev/null
+++ b/data/small/train/shard_7.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b44c60822b7ffbfa274874f7a4485667fe04ccc34f5027c4284660660f3c16ee
+size 4352962
diff --git a/data/small/train/shard_8.jsonl b/data/small/train/shard_8.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b4a59df0e144eadadae4d0a76a77e7359273bbb2
--- /dev/null
+++ b/data/small/train/shard_8.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d025bcbfca5c0ef99603d598b780d6a1d39a775502e666493e244f55c21c4dde
+size 4397141
diff --git a/data/small/train/shard_9.jsonl b/data/small/train/shard_9.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..ca93ea8be2e59733e8c440db08ad7ff3744fe6c3
--- /dev/null
+++ b/data/small/train/shard_9.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:de791eb2c92ace08ff1683c0633c5438273a828b3d5230c034058a4ff41016db
+size 287131
diff --git a/data/small/validate/shard_0.jsonl b/data/small/validate/shard_0.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..3af8d3bf57e28c04f0190638184d2cb2a92b7b55
--- /dev/null
+++ b/data/small/validate/shard_0.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41cce5019be2ca7b0b4bd9b160d5ad55d5e4d2b04449d128442062fdb4ac8136
+size 1297470
diff --git a/data/small/validate/shard_1.jsonl b/data/small/validate/shard_1.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..437bee7739a9a82880b957e8673435f866d8e03c
--- /dev/null
+++ b/data/small/validate/shard_1.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77c5af2c2e31b45bb691e5015aeecec11e588ae55ef5148b569f617294d5d1dc
+size 1315669
diff --git a/data/small/validate/shard_2.jsonl b/data/small/validate/shard_2.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..9843d8e551bff23cdcae3897d6693434694a22eb
--- /dev/null
+++ b/data/small/validate/shard_2.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9bd999afc51ca5c6cf349d4434e68f714e1ffd8484e7c0e1f0f4f3d641ef037d
+size 1482992
diff --git a/data/small/validate/shard_3.jsonl b/data/small/validate/shard_3.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/validate/shard_4.jsonl b/data/small/validate/shard_4.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/validate/shard_5.jsonl b/data/small/validate/shard_5.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/validate/shard_6.jsonl b/data/small/validate/shard_6.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/validate/shard_7.jsonl b/data/small/validate/shard_7.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/validate/shard_8.jsonl b/data/small/validate/shard_8.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/data/small/validate/shard_9.jsonl b/data/small/validate/shard_9.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/financial-reports-sec.py b/financial-reports-sec.py
index 4627d3736cf1b53dfe5800e20df7062437e35625..8d9f146b3234b018c5d6e9021cbffba150d3070a 100644
--- a/financial-reports-sec.py
+++ b/financial-reports-sec.py
@@ -27,26 +27,23 @@ _LICENSE = "apache-2.0"
 
 _NOS_SHARDS = 10
 
-_SMALL_THRESHOLD_TRAIN = 200_000
+_URLS = {item+'_'+config: ["data/"+config+"/"+item+"/shard_"+str(shard)+".jsonl" for shard in range(_NOS_SHARDS)] for item in ['test', 'train', 'validate'] for config in ["large", "small"]}
 
-_SMALL_THRESHOLD_OTHERS = 20_000
+_REPORT_KEYS = ['section_1', 'section_1A', 'section_1B', 
+                 'section_2', 'section_3', 'section_4', 
+                 'section_5', 'section_6', 'section_7', 
+                 'section_7A', 'section_8', 'section_9', 
+                 'section_9A', 'section_9B', 'section_10', 
+                 'section_11', 'section_12', 'section_13', 
+                 'section_14', 'section_15']
 
-_URLS = {item: ["data/"+item+"/shard_"+str(shard)+".jsonl" for shard in range(_NOS_SHARDS)] for item in ['test', 'train', 'validate']}
+_LITE_FEATURES = ["cik", "sentence", "section", "labels", "filingDate", "docID", "sentenceID", "sentenceCount"]
 
 _ALL_FEATURES = {
     "cik": datasets.Value("string"),
     "sentence": datasets.Value("string"),
     "section": datasets.ClassLabel(num_classes=20, 
-                                   names=['section_1', 'section_10', 
-                                          'section_11', 'section_12', 
-                                          'section_13', 'section_14', 
-                                          'section_15', 'section_1A', 
-                                          'section_1B', 'section_2', 
-                                          'section_3', 'section_4', 
-                                          'section_5', 'section_6', 
-                                          'section_7', 'section_7A', 
-                                          'section_8', 'section_9', 
-                                          'section_9A', 'section_9B']),
+                                   names=_REPORT_KEYS),
     "labels": {
             "1d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]),
             "5d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]),
@@ -54,6 +51,9 @@ _ALL_FEATURES = {
         },
     "filingDate": datasets.Value("string"),
     "name": datasets.Value("string"), 
+    "docID": datasets.Value("string"), 
+    "sentenceID": datasets.Value("string"), 
+    "sentenceCount": datasets.Value("int64"), 
     "tickers": [datasets.Value("string")],  
     "exchanges": [datasets.Value("string")],
     "entityType": datasets.Value("string"),
@@ -91,21 +91,21 @@ _ALL_FEATURES = {
 
 class FinancialReportsSec(datasets.GeneratorBasedBuilder):
 
-    VERSION = datasets.Version("1.1.0")
+    VERSION = datasets.Version("1.1.1")
 
     BUILDER_CONFIGS = [
-        datasets.BuilderConfig(name="lite", version=VERSION, description="This returns the dataset with only the critical data needed for analysis."),
-        datasets.BuilderConfig(name="full", version=VERSION, description="This returns the dataset with all metadata included."),
+        datasets.BuilderConfig(name="large_lite", version=VERSION, description="This returns the dataset with only the critical data needed for analysis."),
+        datasets.BuilderConfig(name="large_full", version=VERSION, description="This returns the dataset with all metadata included."),
         datasets.BuilderConfig(name="small_lite", version=VERSION, description="This returns a smaller version of the dataset with only the critical data needed for analysis."),
         datasets.BuilderConfig(name="small_full", version=VERSION, description="This returns a smaller version of the dataset with all metadata included."),
     ]
 
     def _info(self):        
         
-        lite_features = datasets.Features({k: v for k, v in _ALL_FEATURES.items() if k in ["cik", "sentence", "section", "labels", "filingDate"]})
+        lite_features = datasets.Features({k: v for k, v in _ALL_FEATURES.items() if k in _LITE_FEATURES})
         full_features = datasets.Features(_ALL_FEATURES)
         
-        features = full_features if self.config.name == "full" or self.config.name == 'small_full' else lite_features
+        features = full_features if self.config.name.endswith('full') else lite_features
         return datasets.DatasetInfo(
             # This is the description that will appear on the datasets page.
             description=_DESCRIPTION,
@@ -116,14 +116,19 @@ class FinancialReportsSec(datasets.GeneratorBasedBuilder):
         )
 
     def _split_generators(self, dl_manager):
-        urls = _URLS
+        if self.config.name.split('_')[0] == 'large':
+            urls = {k: v for k, v in _URLS.items() if k.endswith('large')}
+        else:
+            urls = {k: v for k, v in _URLS.items() if k.endswith('small')}
+            
         data_dir = dl_manager.download_and_extract(urls)
+        
         return [
             datasets.SplitGenerator(
                 name=datasets.Split.TRAIN,
                 # These kwargs will be passed to _generate_examples
                 gen_kwargs={
-                    "filepaths": data_dir["train"],
+                    "filepaths": data_dir["train_large"] if self.config.name.startswith('large') else data_dir["train_small"],
                     "split": "train",
                 },
             ),
@@ -131,7 +136,7 @@ class FinancialReportsSec(datasets.GeneratorBasedBuilder):
                 name=datasets.Split.VALIDATION,
                 # These kwargs will be passed to _generate_examples
                 gen_kwargs={
-                    "filepaths": data_dir["validate"],
+                    "filepaths": data_dir["validate_large"] if self.config.name.startswith('large') else data_dir["validate_small"],
                     "split": "validate",
                 },
             ),
@@ -139,7 +144,7 @@ class FinancialReportsSec(datasets.GeneratorBasedBuilder):
                 name=datasets.Split.TEST,
                 # These kwargs will be passed to _generate_examples
                 gen_kwargs={
-                    "filepaths": data_dir["test"],
+                    "filepaths": data_dir["test_large"] if self.config.name.startswith('large') else data_dir["test_small"],
                     "split": "test"
                 },
             ),
@@ -147,35 +152,50 @@ class FinancialReportsSec(datasets.GeneratorBasedBuilder):
 
     # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
     def _generate_examples(self, filepaths, split):
-        reads = 0
-        threshold = _SMALL_THRESHOLD_TRAIN if split == 'train' else _SMALL_THRESHOLD_OTHERS
-        for filepath in filepaths:
-            with open(filepath, encoding="utf-8") as f:
+        # Store the sentence count for the current config. Not unique across configs.
+        sentenceCount = 0
+        
+        # filepath is expected to be already parsed for config and split.
+        for filepath in filepaths:           
+            with open(filepath, encoding="utf-8") as f:                
                 for firmIdx, row in enumerate(f):
+                    # Each row is a single firm observation.
                     data = json.loads(row)                    
+                    # Iterate the filings.
                     for filing in data["filings"]:
-                        for sec_id, section in filing["report"].items():
-                            for idx, sentence in enumerate(section):  
-                                reads += 1
-                                if self.config.name == 'small_full' or self.config.name == 'small_lite':
-                                    if reads > threshold:
-                                        return None
-                                key = data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0]+'_'+sec_id+'_'+str(idx)
-                                if self.config.name == "lite" or self.config.name == 'small_lite':                                    
+                        # Iterate over _REPORT_KEYS to ensure that the order of sections
+                        # is consistent.
+                        for section_id in _REPORT_KEYS:
+                            # For the small versions of the dataset, some of the
+                            # sections will be cut-off and hence None.
+                            if filing["report"][section_id] is None:
+                                return None
+                            # Finally, iterate the sentences in the section of the filing.
+                            for idx, sentence in enumerate(filing["report"][section_id]):  
+                                sentenceCount += 1
+                                key = data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0]+'_'+section_id+'_'+str(idx)
+                                
+                                if self.config.name.endswith('lite'):                                    
                                     yield key, {
                                         "cik": data["cik"],
                                         "sentence": sentence,
-                                        "section": sec_id,
+                                        "section": section_id,
                                         "labels": filing["labels"],
                                         "filingDate": filing["filingDate"],
+                                        "docID": data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0],
+                                        "sentenceID": key,
+                                        "sentenceCount": sentenceCount,
                                     }
                                 else:
                                     yield key, {
                                         "cik": data["cik"],
                                         "sentence": sentence,
-                                        "section": sec_id,
+                                        "section": section_id,
                                         "labels": filing["labels"],
                                         "filingDate": filing["filingDate"],
+                                        "docID": data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0],
+                                        "sentenceID": key,
+                                        "sentenceCount": sentenceCount,
                                         "name": data["name"],
                                         "tickers": data["tickers"],
                                         "exchanges": data["exchanges"],