-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreprocess.py
More file actions
141 lines (117 loc) · 3.42 KB
/
preprocess.py
File metadata and controls
141 lines (117 loc) · 3.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob, pickle, joblib
import zipfile, uuid, time, sys, os, json
from tqdm import tqdm
from dateutil import parser
from sklearn.preprocessing import LabelEncoder
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import random
import itertools
from collections import Counter, OrderedDict
import xlsxwriter
print(torch.__version__)
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print("CUDA is not available. Training on CPU ...")
else:
print("CUDA is available! Training on GPU ...")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def save_pickle(dataset, file_path):
with open(file_path, "wb") as file:
pickle.dump(dataset, file)
def load(file_name):
with open(file_name, "rb") as f:
return pickle.load(f)
# In[20]:
def collect_ns_item_set(click_seq_test):
click_list_total = []
for i in range(click_seq_test.shape[0]):
click_list = list(click_seq_test["item_encode_seq"].iloc[i])
click_list.append(click_seq_test["target_item"].iloc[i])
click_list_total.append(click_list)
click_list_total = list(itertools.chain(*click_list_total))
total_click_set = list(Counter(click_list_total).keys())
return total_click_set
# Padding and fix the length of sequence
def pad(seqs, to_len, padding):
if len(seqs) < to_len:
paddeds = [padding] * max(0, to_len - len(seqs)) + seqs
else:
paddeds = seqs[-to_len:]
return paddeds
media = "AviviD_DatasetA" # 'AviviD_DatasetA'/'AviviD_DatasetB'
source = "Ads"
scenario = "Browse" #'browse'/'push'
ns_set = collect_ns_item_set(
load(
"./data/"
+ str(media)
+ "/"
+ str(scenario)
+ "_"
+ str(source)
+ "/click_seq_test.pkl"
)
)
mode_ = ["train", "val", "test"]
for mode in mode_:
df = load(
"./data/"
+ str(media)
+ "/"
+ str(scenario)
+ "_"
+ str(source)
+ "/click_seq_"
+ str(mode)
+ ".pkl"
)
df["ns_list"] = 0
ns_ = []
week = []
hour = []
delta = []
item = []
pop = []
to_len = 20
# Sample 99 negative samples from the non-click item set.
for i in tqdm(range(df.shape[0])):
res = [
j
for j in ns_set
if j not in df["item_encode_seq"].iloc[i] + [df["target_item"].iloc[i]]
]
ns_.append(random.sample(res, 99))
week.append(pad(df["click_time_weekday_seq"].iloc[i], to_len, 0))
hour.append(pad(df["click_time_hour_seq"].iloc[i], to_len, 25))
delta.append(pad(df["click_time_delta(days)_seq"].iloc[i], to_len, 0))
item.append(pad(df["item_encode_seq"].iloc[i], to_len, 999999))
pop.append(pad(df["pop_click_list"].iloc[i], to_len, 999999))
df["ns_list"] = ns_
df["click_time_weekday_seq"] = week
df["click_time_hour_seq"] = hour
df["click_time_delta(days)_seq"] = delta
df["item_encode_seq"] = item
df["pop_click_list"] = pop
save_pickle(
df,
"./data/"
+ str(media)
+ "/"
+ str(scenario)
+ "_"
+ str(source)
+ "/click_seq_"
+ str(mode)
+ "_v.pkl",
)