Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

preprocess.py 2.6 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
  1. import gcsfs
  2. import os
  3. import pandas as pd
  4. from sklearn.model_selection import train_test_split
  5. import yaml
  6. import reddit_utils
  7. with open(r"./general_params.yml") as f:
  8. params = yaml.safe_load(f)
  9. CHUNK_SIZE = params["chunk_size"]
  10. TARGET_LABEL = params["target_col"]
  11. UNIQUE_FLAIRS = [
  12. "Discussion",
  13. "Project",
  14. "Research",
  15. "None",
  16. "News",
  17. "Shameless Self Promo",
  18. "Inaccurate",
  19. "Misleading",
  20. "Clickbait",
  21. ]
  22. def load_and_process_data(random_state=42):
  23. print("Loading data in chuncks...")
  24. raw_data = os.path.join('raw', reddit_utils.RAW_DF_PATH)
  25. processed_train = os.path.join('processed', reddit_utils.TRAIN_DF_PATH)
  26. processed_test = os.path.join('processed', reddit_utils.TEST_DF_PATH)
  27. for i, chunk in enumerate(
  28. pd.read_csv(raw_data, chunksize=CHUNK_SIZE)
  29. ):
  30. print(f"Processing chunk {i + 1}...")
  31. processed_data = process(chunk)
  32. print("Splitting into train and test data...")
  33. train_chunk, test_chunk = train_test_split(
  34. processed_data,
  35. random_state=random_state,
  36. stratify=processed_data[TARGET_LABEL],
  37. )
  38. print("Saving to cloud...")
  39. save_data(train_chunk, processed_train, test_chunk, processed_test, i)
  40. def process(chunk):
  41. df = chunk.copy()
  42. df = df.drop(columns=["id", "author"])
  43. df = df.rename(columns={"selftext": "body", "link_flair_text": "flair"})
  44. df["title_len"] = df.title.str.len()
  45. df["body_len"] = df.body.str.len()
  46. df["has_thumbnail"] = [
  47. 0 if (x == "self" or x == "default") else 1 for x in df["thumbnail"]
  48. ]
  49. df = df.fillna({"body": "", "flair": "None", "body_len": 0})
  50. df["flair"] = ["Discussion" if (x == "Discusssion") else x for x in df["flair"]]
  51. df = pd.concat([df, pd.get_dummies(df["flair"], prefix="flair")], axis=1).drop(
  52. ["flair"], axis=1
  53. )
  54. for flair in UNIQUE_FLAIRS:
  55. flair_with_prefix = "flair_" + flair
  56. if flair_with_prefix not in df.columns:
  57. df[flair_with_prefix] = 0
  58. df = df[df["title"] != "[deleted by user]"]
  59. df = df[df["body"] != "[deleted]"]
  60. df = df[df["body"] != "[removed]"]
  61. df["title_and_body"] = (df["title"] + " " + df["body"]).astype(str)
  62. return df
  63. def save_data(train_chunk, train_f, test_chunk, test_f, i):
  64. # We want to write the headers only once
  65. header = True if i == 0 else False
  66. train_chunk.to_csv(train_f, header=header, mode="a")
  67. test_chunk.to_csv(test_f, header=header, mode="a")
  68. if __name__ == "__main__":
  69. load_and_process_data()
  70. print("Loading and processing done!")
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...