-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathclass_file_structurer.py
More file actions
356 lines (298 loc) · 12.2 KB
/
class_file_structurer.py
File metadata and controls
356 lines (298 loc) · 12.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
"""
File Mapping from dataframes to File structure
########################
# F.garcia
# creation: 05.02.2025
########################
"""
import pandas as pd
import os
from rich import print
pd.options.mode.copy_on_write = True
class FileStructurer():
"""Class to form a file structure from a dataframe, must contain "id", "filepath", "filename", "size" in dataframe columns.
"""
def __init__(self,df:pd.DataFrame,additional_columns:list=None):
"""Converts df to filestructure
Args:
df (pd.DataFrame): dataframe
additional_columns (list[str], optional): additional columns to be included in tuples (filename,size,additional). Defaults to None and forms (filename,size).
Raises:
AssertionError: when missing "id", "filepath", "filename" or "size" in dataframe
"""
if not self.check_df_cols_is_ok(df):
raise AssertionError('Missing columns "id", "filepath", "filename", "size" in dataframe')
self.df=self._fix_paths_in_df(df)
self.additional_columns=additional_columns
@staticmethod
def check_df_cols_is_ok(df:pd.DataFrame)->bool:
"""Check "id", "filepath", "filename", "size" are in dataframe
Returns:
bool: True if they are, False if not
"""
df_cols=list(df.columns)
req_cols=["id", "filepath", "filename", "size"]
for col in req_cols:
if col not in df_cols:
return False
return True
def get_max_depth(self,df:pd.DataFrame)->int:
"""Calculates maximum depth of df
Args:
df (pd.DataFrame): dataframe with filepath
Returns:
int: max depth of paths
"""
try:
the_max=df['path_depth'].max()
except KeyError:
df=self.add_depth_to_df(df)
the_max=df['path_depth'].max()
return int(the_max)
def get_min_depth(self,df:pd.DataFrame)->int:
"""Calculates minimum depth of df
Args:
df (pd.DataFrame): dataframe with filepath
Returns:
int: min depth of paths
"""
try:
the_min=df['path_depth'].min()
except KeyError:
df=self.add_depth_to_df(df)
the_min=df['path_depth'].min()
return int(the_min)
def create_file_tuple_df(self,df:pd.DataFrame)->pd.DataFrame:
"""Add 'file_tuple' column to dataframe with (filename,size) tuples when there is no 'file_tuple' column.
Args:
df (pd.DataFrame): _description_
Raises:
KeyError: _description_
Returns:
pd.DataFrame: dataframe with 'file_tuple'
"""
df_cols=list(df.columns)
if 'file_tuple' in df_cols:
return df
if isinstance(self.additional_columns,list):
req_cols=["filename", "size"]+self.additional_columns
else:
req_cols=["filename", "size"]
for col in req_cols:
if col not in df_cols:
raise KeyError(f'missing {col} required column in dataframe!')
# Create a new column with (filename, size) tuples
def get_tuple_formed(column_list,row):
return tuple([row[col] for col in column_list])
df['file_tuple'] = df.apply(lambda row: get_tuple_formed(req_cols,row), axis=1)
return df
def compress_nth_file_structure(self,df:pd.DataFrame)->pd.DataFrame:
"""Compresses the nth last path folder in the dataframe into a filestructure.
Args:
df (pd.DataFrame): dataframe with minimum "id", "filepath", "filename", "size" in columns
Returns:
pd.DataFrame: dataframe with compressed nth path
"""
# Create a new column with (filename, size) tuples
df=self.create_file_tuple_df(df)
# Add depths
df=self.add_depth_to_df(df)
# Step 1: Identify max depth
max_depth = self.get_max_depth(df)
# Step 2–3: For rows at max depth, extract path_n and truncate filepath
df=self.add_splitted_path_n_df(df,max_depth)
# Group and aggregate file_tuple into a list
df=self.group_and_aggregate_df(df)
# Get grouped dataframe
grouped=self.get_grouped_df(df)
# refresh the depths
grouped=self.add_depth_to_df(grouped)
df = self.add_depth_to_df(df)
return self.merge_grouped_df(df,grouped)
@staticmethod
def add_depth_to_df(df:pd.DataFrame)->pd.DataFrame:
""" adds computed depth of paths
Args:
df (pd.DataFrame): dataframe
Returns:
pd.DataFrame: dataframe with 'path_depth'
"""
def compute_depth(path):
if not path or path.strip() == "":
return 0
return len(path.strip("/").split("/"))
df['path_depth'] = df['filepath'].apply(compute_depth)
return df
@staticmethod
def _fix_paths_in_df(df:pd.DataFrame)->pd.DataFrame:
"""Converts path strings to a standard form for making fast splitting
All paths are separatted with single "/", and start with "/".
Args:
df (pd.DataFrame): df with standard filepath format
"""
def set_standard_path(path):
if not path or path.strip() == "":
return ''
path=str(path).replace('\\\\',"/").replace("//","/")
if not str(path).startswith(('\\',"/",os.sep)):
path="/"+path
return path.replace('\\',"/").replace(os.sep,"/")
df['filepath'] = df['filepath'].apply(set_standard_path)
return df
@staticmethod
def add_splitted_path_n_df(df:pd.DataFrame,max_depth)->pd.DataFrame:
"""Separates the path in the the start of path and last path folder
Args:
df (pd.DataFrame): dataframe
max_depth (str): depth to separate path
Returns:
pd.DataFrame: with filepath parent_path, and path_n separation at depth n
"""
# Step 2–3: For rows at max depth, extract path_n and truncate filepath
def split_path(path, current_depth, max_depth):
if current_depth != max_depth:
return path, None # Leave untouched
parts = path.strip("/").split("/")
path_n = parts[-1] if len(parts) > 0 else None
parent_path = "/" + "/".join(parts[:-1]) if len(parts) > 1 else ""
return parent_path, path_n
# Apply the transformation
df[['filepath', 'path_n']] = df.apply(
lambda row: pd.Series(split_path(row['filepath'], row['path_depth'], max_depth)),
axis=1
)
return df
@staticmethod
def group_and_aggregate_df(df: pd.DataFrame)->pd.DataFrame:
"""Compress the shared filepath and path_n into lists. Add to the list the different types of data, if list extend it.
Args:
df (pd.DataFrame): dataframe
Returns:
pd.DataFrame: Compressed filepath and path_n
"""
# from collections.abc import Iterable
def smart_aggregate(series):
result = []
for item in series:
if isinstance(item, tuple):
result.append(item)
elif isinstance(item, dict):
#result.insert(0,item) # put dictionaries first
result.append(item)
elif isinstance(item, list):
result.extend(item)
else:
result.append(item)
return result
compressed_df = (
df.groupby(['filepath', 'path_n'], dropna=False)
.agg({'file_tuple': smart_aggregate})
.reset_index()
)
return compressed_df
@staticmethod
def get_grouped_df(df:pd.DataFrame)->pd.DataFrame:
"""Forms dictionary with filepath and path_n compressed. sets the dictionary in file_tuple column
Args:
df (pd.DataFrame): dataframe
Returns:
pd.DataFrame: Compressed df with file_tuple dictionary
"""
def convert_list(f_t):
if isinstance(f_t,dict):
f_list=[]
for key,value in f_t.items():
f_list.append({key:value})
return f_list
else:
return f_t
# Filter out non-NaN 'path_n' rows
filtered = df[df['path_n'].notna()]
# Drop 'path_n' and 'file_tuple' duplicates for safety
filtered = filtered[['filepath', 'path_n', 'file_tuple']].copy()
# Build nested dictionary by explicitly excluding grouping columns
grouped = (
filtered.groupby('filepath', group_keys=False)
.apply(lambda g: pd.Series({'file_tuple': dict(zip(g['path_n'], g['file_tuple']))}))#,include_groups=False)
.reset_index()
)
grouped['file_tuple']=grouped['file_tuple'].apply(lambda row: convert_list(row))
return grouped
@staticmethod
def merge_grouped_df(df:pd.DataFrame,grouped:pd.DataFrame)->pd.DataFrame:
"""Merge the df and grouped dataframes keeping order
Args:
df (pd.DataFrame): dataframe
grouped (pd.DataFrame): dataframe
Returns:
pd.DataFrame: merged df
"""
# Keep only rows where path_n is NaN
direct_files_df = df[df['path_n'].isna()].copy()
direct_files_df.drop(columns='path_n', inplace=True)
# Merge with grouped dict-style folders
final_df = pd.concat([grouped, direct_files_df], ignore_index=True) #sets dictionaries first
#return final_df
# Reorder columns if needed
preferred_order = ['filepath'] #['filepath', 'file_tuple']
remaining_cols = [col for col in final_df.columns if col not in preferred_order]
return final_df[preferred_order + remaining_cols]
def fully_compress(self,min_depth=1)->pd.DataFrame:
"""Compress the dataframe Iteratevily until max compression depth.
Returns:
pd.DataFrame: max compressed dataframe
"""
if self.df['filepath'].size==0:
return self.df
df = self.add_depth_to_df(self.df)
max_depth=self.get_max_depth(df)
# min_depth=1 # self.get_min_depth(df)
iii=1
while max_depth > min_depth:
print(f'{iii} Compressing {max_depth} to {min_depth}, {df["path_depth"].size} elements left')
df = self.compress_nth_file_structure(df)
max_depth=self.get_max_depth(df)
iii+=1
# Last Compression
print(f'{iii} Compressing {max_depth} to {min_depth}, {df["path_depth"].size} elements left')
df = self.compress_nth_file_structure(df)
return df
def get_file_structure(self)->list:
"""Returns the file structure of the compressed dataframe
Returns:
list: File structure list
"""
df=self.fully_compress(1)
try:
if df['file_tuple'].size==0:
return []
except KeyError:
# Empty df case
return []
if df['file_tuple'].size==1:
if df['filepath'][0]=='':
return df['file_tuple'][0]
return [{df['filepath'][0]:df['file_tuple'][0]}]
else:
end_list=[]
for file_tup in df['file_tuple']:
if isinstance(file_tup,list):
end_list.extend(file_tup)
else:
end_list.append(file_tup)
return end_list
# self.df=df
# df=self.fully_compress(0)
# # #add the path_n
# df=self.add_splitted_path_n_df(df,1)
# #form a 1 item list of dictionaries with all levels
# df=self.get_grouped_df(df)
return df['file_tuple'][0]
def get_file_structure_dict(self,name:str)->dict:
"""Returns the file structure of the compressed dataframe
under a dictionary {name:[file structure list]}
Returns:
dict: {name:[file structure list]}
"""
fs_list=self.get_file_structure()
return {name:fs_list}