Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

preprocess.py 2.9 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  1. import gcsfs
  2. import os
  3. import pandas as pd
  4. from sklearn.model_selection import train_test_split
  5. import yaml
  6. with open(r"./general_params.yaml") as f:
  7. params = yaml.safe_load(f)
  8. CHUNK_SIZE = params["chunk_size"]
  9. TARGET_LABEL = params["target_col"]
  10. PROJECT_NAME = "talos-project"
  11. GCLOUD_CRED_ENV_VAR = "GOOGLE_APPLICATION_CREDENTIALS"
  12. UNIQUE_FLAIRS = [
  13. "Discussion",
  14. "Project",
  15. "Research",
  16. "None",
  17. "News",
  18. "Shameless Self Promo",
  19. "Inaccurate",
  20. "Misleading",
  21. "Clickbait",
  22. ]
  23. raw_df_path = "rML-raw-data.csv"
  24. train_df_path = "rML-train.csv"
  25. test_df_path = "rML-test.csv"
  26. def get_remote_gs_wfs():
  27. print("Retreiving location of remote working file system...")
  28. stream = os.popen("dvc remote list --local")
  29. output = stream.read()
  30. remote_wfs_loc = output.split("\t")[1].split("\n")[0]
  31. return remote_wfs_loc
  32. def load_and_process_data(remote_wfs, random_state=42):
  33. fs = gcsfs.GCSFileSystem(
  34. project=PROJECT_NAME, token=os.environ[GCLOUD_CRED_ENV_VAR]
  35. )
  36. with fs.open(os.path.join(remote_wfs, train_df_path), "a") as train_f, fs.open(
  37. os.path.join(remote_wfs, test_df_path), "a"
  38. ) as test_f:
  39. print("Loading data in chuncks...")
  40. for i, chunk in enumerate(
  41. pd.read_csv(os.path.join(remote_wfs, raw_df_path), chunksize=CHUNK_SIZE)
  42. ):
  43. print(f"Processing chunk {i+1}...")
  44. processed_data = process(chunk)
  45. print("Splitting into train and test data...")
  46. train_chunk, test_chunk = train_test_split(
  47. processed_data,
  48. random_state=random_state,
  49. stratify=processed_data[TARGET_LABEL],
  50. )
  51. print("Saving to cloud...")
  52. save_data(train_chunk, train_f, test_chunk, test_f, i)
  53. def process(chunk):
  54. df = chunk.copy()
  55. df = df.drop(columns=["id", "author"])
  56. df = df.rename(columns={"selftext": "body", "link_flair_text": "flair"})
  57. df["title_len"] = df.title.str.len()
  58. df["body_len"] = df.body.str.len()
  59. df["has_thumbnail"] = [
  60. 0 if (x == "self" or x == "default") else 1 for x in df["thumbnail"]
  61. ]
  62. df = df.fillna({"body": "", "flair": "None", "body_len": 0})
  63. df["flair"] = ["Discussion" if (x == "Discusssion") else x for x in df["flair"]]
  64. df = pd.concat([df, pd.get_dummies(df["flair"], prefix="flair", columns=UNIQUE_FLAIRS)], axis=1).drop(
  65. ["flair"], axis=1
  66. )
  67. df["title_and_body"] = (df["title"] + " " + df["body"]).astype(str)
  68. return df
  69. def save_data(train_chunk, train_f, test_chunk, test_f, i):
  70. # TODO: Saving is kinda slow now. Try to improve performance
  71. # We want to write the headers only once
  72. header = True if i == 0 else False
  73. train_chunk.to_csv(train_f, header=header, mode="a")
  74. test_chunk.to_csv(test_f, header=header, mode="a")
  75. if __name__ == "__main__":
  76. remote_wfs = get_remote_gs_wfs()
  77. load_and_process_data(remote_wfs)
  78. print("Loading and processing done!")
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...