Datasets:

Languages:
English
Tags:
Not-For-All-Audiences
License:
Gaeros commited on
Commit
447a219
1 Parent(s): f6d66de

importdb fixes

Browse files
Files changed (1) hide show
  1. e6db/importdb.py +72 -64
e6db/importdb.py CHANGED
@@ -19,9 +19,10 @@ def convert_db_export_to_parquet(
19
  paths = get_csv_paths(dumps_path)
20
  out_path = dumps_path if out_path is None else Path(out_path)
21
 
 
 
22
  logging.info("Reading tag CSVs")
23
  tags, aliases, impls = read_tags_csvs(paths)
24
- post_parquet_paths, tag_freqs = read_posts_csv(paths["posts"], out_path)
25
 
26
  logging.info("Normalizing tags")
27
  tags, tag2index, impl_mapped, rejtag_impls_csq_mapped = normalize_tag_list(
@@ -88,7 +89,7 @@ def read_tags_csvs(paths, alias_implications=True):
88
  """Reads tags, tag_aliases, tag_implications CSVs"""
89
  tags = pl.read_csv(
90
  paths["tags"],
91
- dtypes=[pl.Categorical, pl.UInt8],
92
  columns=["name", "category"],
93
  )
94
 
@@ -173,8 +174,8 @@ def normalize_tag_list(tag_freqs, tags, aliases, impls, min_freq=2, blacklist=No
173
  tags.lazy(), how="left", left_on="tag", right_on="name", validate="1:1"
174
  )
175
  .with_columns(col("category").fill_null(0))
176
- .sort("freq", descending=True)
177
  .filter(col("freq") >= min_freq)
 
178
  .collect()
179
  )
180
 
@@ -229,7 +230,7 @@ def normalize_tag_list(tag_freqs, tags, aliases, impls, min_freq=2, blacklist=No
229
  def read_posts_csv(
230
  posts_csv_path,
231
  out_path,
232
- batch_size=1 << 18,
233
  write_parquets=True,
234
  rating_to_tag=True,
235
  ):
@@ -249,7 +250,7 @@ def read_posts_csv(
249
  image_height=pl.Int32,
250
  tag_string=pl.String,
251
  locked_tags=pl.String,
252
- fav_count=pl.UInt32,
253
  file_ext=pl.String,
254
  parent_id=pl.UInt32,
255
  change_seq=pl.UInt32,
@@ -264,7 +265,7 @@ def read_posts_csv(
264
  is_flagged=pl.String,
265
  score=pl.Int16,
266
  up_score=pl.UInt16,
267
- down_score=pl.UInt16,
268
  is_rating_locked=pl.String,
269
  is_status_locked=pl.String,
270
  is_note_locked=pl.String,
@@ -287,19 +288,26 @@ def read_posts_csv(
287
  "is_deleted",
288
  "score",
289
  "up_score",
 
290
  ]
 
291
  columns_remaps = [
292
  col("created_at").str.to_datetime("%Y-%m-%d %H:%M:%S%.f"),
293
  col("md5").str.decode("hex"),
294
  col("image_width").cast(pl.UInt16),
295
  col("image_height").cast(pl.UInt16),
296
  col("tag_string").str.split(" "),
297
- col("fav_count").cast(pl.UInt16),
298
  col("comment_count").cast(pl.UInt16),
299
  col("up_score").cast(pl.UInt16),
 
300
  ]
301
  reader = pl.read_csv_batched(
302
- posts_csv_path, columns=column_selections, dtypes=schema, batch_size=batch_size
 
 
 
 
 
303
  )
304
 
305
  if rating_to_tag is True:
@@ -315,70 +323,70 @@ def read_posts_csv(
315
  parquet_paths = []
316
  progress = tqdm(desc=f"Reading {posts_csv_path.name}")
317
  while True:
318
- batch = reader.next_batches(1)
319
- if not batch:
320
  break
321
- (chunk_df,) = batch
322
- del batch
323
-
324
- chunk_df = (
325
- chunk_df.lazy()
326
- # Filtering
327
- .filter(col("file_ext").is_in(("jpg", "png", "webp")), is_deleted="f").drop(
328
- "is_deleted"
329
- )
330
- # Projection
331
- .with_columns(*columns_remaps)
332
- )
333
- if isinstance(rating_to_tag, pl.DataFrame):
334
  chunk_df = (
335
- chunk_df.join(rating_to_tag.lazy(), how="left", on="rating")
336
- .with_columns(col("tag_string").list.concat([col("rating_tag")]))
337
- .drop("rating_tag")
 
 
 
 
338
  )
339
- chunk_df = chunk_df.with_columns(
340
- col("tag_string").cast(pl.List(pl.Categorical))
341
- ).collect(streaming=True)
342
-
343
- if write_parquets:
344
- parquet_path = out_path / f"posts-{batch_idx:03}.parquet"
345
- parquet_paths.append(parquet_path)
346
- chunk_df.write_parquet(parquet_path, compression="zstd")
347
-
348
- # Count tag in the batch, accumulate frequencies
349
- chunk_tag_freqs = (
350
- chunk_df.lazy()
351
- .select(tag="tag_string")
352
- .explode("tag")
353
- .group_by("tag")
354
- .len()
355
- .select("tag", freq="len")
356
- .collect()
357
- )
358
- del chunk_df
359
- if tag_freqs is None:
360
- tag_freqs = chunk_tag_freqs
361
- else:
362
- tag_freqs = (
363
- tag_freqs.lazy()
364
- .join(
365
- chunk_tag_freqs.lazy(),
366
- on="tag",
367
- how="outer_coalesce", # validate='1:1' <- needed for streaming, wth?
368
  )
369
- .select(
370
- "tag",
371
- freq=col("freq").fill_null(0) + col("freq_right").fill_null(0),
372
- )
373
- .collect(streaming=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
374
  )
375
- del chunk_tag_freqs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
 
377
- batch_idx += 1
378
- progress.update()
379
 
380
  progress.close()
381
- return parquet_paths, tag_freqs.rechunk()
382
 
383
 
384
  def post_process_posts(
 
19
  paths = get_csv_paths(dumps_path)
20
  out_path = dumps_path if out_path is None else Path(out_path)
21
 
22
+ post_parquet_paths, tag_freqs = read_posts_csv(paths["posts"], out_path)
23
+
24
  logging.info("Reading tag CSVs")
25
  tags, aliases, impls = read_tags_csvs(paths)
 
26
 
27
  logging.info("Normalizing tags")
28
  tags, tag2index, impl_mapped, rejtag_impls_csq_mapped = normalize_tag_list(
 
89
  """Reads tags, tag_aliases, tag_implications CSVs"""
90
  tags = pl.read_csv(
91
  paths["tags"],
92
+ schema_overrides=[pl.Categorical, pl.UInt8],
93
  columns=["name", "category"],
94
  )
95
 
 
174
  tags.lazy(), how="left", left_on="tag", right_on="name", validate="1:1"
175
  )
176
  .with_columns(col("category").fill_null(0))
 
177
  .filter(col("freq") >= min_freq)
178
+ .sort([-col("freq").cast(pl.Int32), col("tag").cast(pl.String)])
179
  .collect()
180
  )
181
 
 
230
  def read_posts_csv(
231
  posts_csv_path,
232
  out_path,
233
+ batch_size=1 << 17,
234
  write_parquets=True,
235
  rating_to_tag=True,
236
  ):
 
250
  image_height=pl.Int32,
251
  tag_string=pl.String,
252
  locked_tags=pl.String,
253
+ fav_count=pl.UInt16,
254
  file_ext=pl.String,
255
  parent_id=pl.UInt32,
256
  change_seq=pl.UInt32,
 
265
  is_flagged=pl.String,
266
  score=pl.Int16,
267
  up_score=pl.UInt16,
268
+ down_score=pl.Int16,
269
  is_rating_locked=pl.String,
270
  is_status_locked=pl.String,
271
  is_note_locked=pl.String,
 
288
  "is_deleted",
289
  "score",
290
  "up_score",
291
+ "down_score",
292
  ]
293
+ # Conversions that can only be done after filtering
294
  columns_remaps = [
295
  col("created_at").str.to_datetime("%Y-%m-%d %H:%M:%S%.f"),
296
  col("md5").str.decode("hex"),
297
  col("image_width").cast(pl.UInt16),
298
  col("image_height").cast(pl.UInt16),
299
  col("tag_string").str.split(" "),
 
300
  col("comment_count").cast(pl.UInt16),
301
  col("up_score").cast(pl.UInt16),
302
+ (-col("down_score")).cast(pl.UInt16),
303
  ]
304
  reader = pl.read_csv_batched(
305
+ posts_csv_path,
306
+ columns=column_selections,
307
+ schema_overrides=schema,
308
+ batch_size=batch_size,
309
+ low_memory=False,
310
+ n_threads=1,
311
  )
312
 
313
  if rating_to_tag is True:
 
323
  parquet_paths = []
324
  progress = tqdm(desc=f"Reading {posts_csv_path.name}")
325
  while True:
326
+ batches = reader.next_batches(1)
327
+ if batches is None:
328
  break
329
+ for chunk_df in batches:
 
 
 
 
 
 
 
 
 
 
 
 
330
  chunk_df = (
331
+ chunk_df.lazy()
332
+ # Filtering
333
+ .filter(
334
+ col("file_ext").is_in(("jpg", "png", "webp")), is_deleted="f"
335
+ ).drop("is_deleted")
336
+ # Projection
337
+ .with_columns(columns_remaps)
338
  )
339
+ if isinstance(rating_to_tag, pl.DataFrame):
340
+ chunk_df = (
341
+ chunk_df.join(rating_to_tag.lazy(), how="left", on="rating")
342
+ .with_columns(col("tag_string").list.concat([col("rating_tag")]))
343
+ .drop("rating_tag")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
  )
345
+ chunk_df = chunk_df.with_columns(
346
+ col("tag_string").cast(pl.List(pl.Categorical))
347
+ ).collect(streaming=True)
348
+
349
+ if write_parquets:
350
+ parquet_path = out_path / f"posts-{batch_idx:03}.parquet"
351
+ parquet_paths.append(parquet_path)
352
+ chunk_df.write_parquet(parquet_path, compression="zstd")
353
+
354
+ # Count tag in the batch, accumulate frequencies
355
+ chunk_tag_freqs = (
356
+ chunk_df.lazy()
357
+ .select(tag="tag_string")
358
+ .explode("tag")
359
+ .group_by("tag")
360
+ .len()
361
+ .select("tag", freq="len")
362
+ .collect()
363
  )
364
+ chunk_n_posts = len(chunk_df)
365
+ del chunk_df
366
+ if tag_freqs is None:
367
+ tag_freqs = chunk_tag_freqs
368
+ else:
369
+ tag_freqs = (
370
+ tag_freqs.lazy()
371
+ .join(
372
+ chunk_tag_freqs.lazy(),
373
+ on="tag",
374
+ how="full", # validate='1:1' <- needed for streaming, wth?
375
+ coalesce=True,
376
+ )
377
+ .select(
378
+ "tag",
379
+ freq=col("freq").fill_null(0) + col("freq_right").fill_null(0),
380
+ )
381
+ .collect(streaming=False)
382
+ )
383
+ del chunk_tag_freqs
384
 
385
+ batch_idx += 1
386
+ progress.update(chunk_n_posts)
387
 
388
  progress.close()
389
+ return parquet_paths, tag_freqs
390
 
391
 
392
  def post_process_posts(