-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Expand file tree
/
Copy pathcommon.py
More file actions
440 lines (349 loc) · 13.1 KB
/
common.py
File metadata and controls
440 lines (349 loc) · 13.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
import copy
import ctypes
import datetime
import hashlib
import importlib
import json
import math
import multiprocessing
import random
import re
import string
import sys
import time
from itertools import product
from nettacker import logger
log = logger.get_logger()
def replace_dependent_response(log, response_dependent):
"""The `response_dependent` is needed for `eval` below."""
if str(log):
key_name = re.findall(re.compile("response_dependent\\['\\S+\\]"), log)
for i in key_name:
try:
key_value = eval(i)
except Exception:
key_value = "response dependent error"
log = log.replace(i, " ".join(key_value))
return log
def merge_logs_to_list(result, log_list=[]):
if isinstance(result, dict):
if "json_event" in list(result.keys()):
if not isinstance(result["json_event"], dict):
result["json_event"] = json.loads(result["json_event"])
for i in result:
if "log" == i:
log_list.append(result["log"])
else:
merge_logs_to_list(result[i], log_list)
return list(set(log_list))
def reverse_and_regex_condition(regex, reverse):
if regex:
if reverse:
return []
return list(set(regex))
else:
if reverse:
return True
return []
def wait_for_threads_to_finish(threads, maximum=None, terminable=False, sub_process=False):
"""Wait until all threads finish or the count drops below maximum.
Args:
threads: List of Thread (or Process) objects to monitor. Dead entries
are removed in-place each iteration.
maximum: If set, return early once fewer than *maximum* threads remain.
terminable: If True, forcibly terminate surviving threads on KeyboardInterrupt.
sub_process: If True, kill surviving sub-processes on KeyboardInterrupt.
Returns:
True when all threads completed (or fell below *maximum*),
False if interrupted by KeyboardInterrupt.
"""
while threads:
try:
threads[:] = [t for t in threads if t.is_alive()]
if maximum and len(threads) < maximum:
break
time.sleep(0.01)
except KeyboardInterrupt:
if terminable:
for thread in threads:
terminate_thread(thread)
if sub_process:
for thread in threads:
thread.kill()
return False
return True
def terminate_thread(thread, verbose=True):
"""
kill a thread https://stackoverflow.com/a/15274929
Args:
thread: an alive thread
verbose: verbose mode/boolean
Returns:
True/None
"""
if verbose:
log.info("killing {0}".format(thread.name))
if not thread.is_alive():
return
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)
if res == 0:
raise ValueError("nonexistent thread id")
elif res > 1:
# if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
return True
def get_http_header_key(http_header):
"""
Return the HTTP header key based on the full string entered by the user
Args:
http_header: a string entered by the user following the -H flag
Returns:
1. The HTTP header key if http_header is a key-value pair
2. The http_header itself if http_header is NOT a key_value pair (i.e. http_header is a plain string)
3. An empty string if http_header is empty
Example:
http_header: "Authorization: Bearer abcdefgh"
Returns -> "Authorization"
"""
# Split only at the first ":"
return http_header.split(":", 1)[0].strip()
def get_http_header_value(http_header):
"""
Return the HTTP header value based on the full string entered by the user
Args:
http_header: a string entered by the user following the -H flag
Returns:
1. The HTTP header value if http_header is a key-value pair
2. None if the http_header is empty or NOT a key-value pair
Example:
http_header: "Authorization: Bearer abcdefgh"
Returns -> "Bearer abcdefgh"
"""
if not http_header or ":" not in http_header:
return None
# Split only at the first ":"
value = http_header.split(":", 1)[1].strip()
return value if value else None
def remove_sensitive_header_keys(event):
"""
Removes the sensitive headers that the user might add
Args:
event: The json event which contains the headers
Returns:
event: The json event without the sensitive headers
"""
from nettacker.config import sensitive_headers
if not isinstance(event, dict):
return event
if "headers" in event:
if not isinstance(event["headers"], dict):
return event
for key in list(event["headers"].keys()):
if key.lower() in sensitive_headers:
del event["headers"][key]
return event
def find_args_value(args_name):
try:
return sys.argv[sys.argv.index(args_name) + 1]
except Exception:
return None
def set_nested_value(d, key_path, value):
keys = [k for k in key_path.split("/") if k]
for key in keys[:-1]:
d = d[key]
d[keys[-1]] = value
def generate_new_sub_steps(sub_steps, data_matrix, arrays):
original_sub_steps = copy.deepcopy(sub_steps)
steps_array = []
array_names = list(arrays.keys())
for array in data_matrix:
for i, array_name in enumerate(array_names):
set_nested_value(original_sub_steps, array_name, array[i])
steps_array.append(copy.deepcopy(original_sub_steps))
return steps_array
def find_repeaters(sub_content, root, arrays):
if isinstance(sub_content, dict) and "nettacker_fuzzer" not in sub_content:
temporary_content = copy.deepcopy(sub_content)
original_root = root
for key in sub_content:
root = original_root
root += key + "/"
temporary_content[key], _root, arrays = find_repeaters(sub_content[key], root, arrays)
sub_content = copy.deepcopy(temporary_content)
root = original_root
if (not isinstance(sub_content, (bool, int, float))) and (
isinstance(sub_content, list) or "nettacker_fuzzer" in sub_content
):
arrays[root] = sub_content
return (sub_content, root, arrays) if root != "" else arrays
class value_to_class:
def __init__(self, value):
self.value = value
def class_to_value(arrays):
original_arrays = copy.deepcopy(arrays)
array_index = 0
for array in arrays:
value_index = 0
for value in array:
if isinstance(value, value_to_class):
original_arrays[array_index][value_index] = value.value
value_index += 1
array_index += 1
return original_arrays
def generate_and_replace_md5(content):
# todo: make it betetr and document it
md5_content = content.split("NETTACKER_MD5_GENERATOR_START")[1].split(
"NETTACKER_MD5_GENERATOR_STOP"
)[0]
md5_content_backup = md5_content
if isinstance(md5_content, str):
md5_content = md5_content.encode()
md5_hash = hashlib.md5(md5_content).hexdigest()
return content.replace(
"NETTACKER_MD5_GENERATOR_START" + md5_content_backup + "NETTACKER_MD5_GENERATOR_STOP",
md5_hash,
)
def generate_target_groups(targets, set_hardware_usage):
"""
Split a list of targets into smaller sublists based on a specified size.
"""
if not targets:
return targets
targets_total = len(targets)
split_size = min(set_hardware_usage, targets_total)
# Calculate the size of each chunk.
chunk_size = (targets_total + split_size - 1) // split_size
return [targets[i : i + chunk_size] for i in range(0, targets_total, chunk_size)]
def arrays_to_matrix(arrays):
"""
Generate a Cartesian product of input arrays as a list of lists.
"""
return [list(item) for item in product(*[arrays[array_name] for array_name in arrays])]
def string_to_bytes(string):
return string.encode()
AVAILABLE_DATA_FUNCTIONS = {
"passwords": {"read_from_file"},
"paths": {"read_from_file"},
"urls": {"read_from_file"},
}
def fuzzer_function_read_file_as_array(filename):
from nettacker.config import PathConfig
return open(PathConfig().payloads_dir / filename).read().split("\n")
def apply_data_functions(data):
original_data = copy.deepcopy(data)
for item in data:
if item not in AVAILABLE_DATA_FUNCTIONS:
continue
for fn_name in data[item]:
if fn_name in AVAILABLE_DATA_FUNCTIONS[item]:
fn = getattr(importlib.import_module("nettacker.core.fuzzer"), fn_name)
if fn is not None:
original_data[item] = fn(data[item][fn_name])
return original_data
ALLOWED_INTERCEPTORS = {
"generate_and_replace_md5": generate_and_replace_md5,
}
def fuzzer_repeater_perform(arrays):
original_arrays = copy.deepcopy(arrays)
for array_name in arrays:
if "nettacker_fuzzer" not in arrays[array_name]:
continue
data = arrays[array_name]["nettacker_fuzzer"]["data"]
data_matrix = arrays_to_matrix(apply_data_functions(data))
prefix = arrays[array_name]["nettacker_fuzzer"]["prefix"]
input_format = arrays[array_name]["nettacker_fuzzer"]["input_format"]
interceptors = copy.deepcopy(arrays[array_name]["nettacker_fuzzer"]["interceptors"])
if interceptors:
interceptors = interceptors.split(",")
suffix = arrays[array_name]["nettacker_fuzzer"]["suffix"]
processed_array = []
for sub_data in data_matrix:
formatted_data = {}
index_input = 0
for value in sub_data:
formatted_data[list(data.keys())[index_input]] = value
index_input += 1
interceptors_function_processed = input_format.format(**formatted_data)
if interceptors:
for interceptor in interceptors:
if interceptor not in ALLOWED_INTERCEPTORS:
raise ValueError(f"Interceptor '{interceptor}' is not allowed")
interceptors_function_processed = ALLOWED_INTERCEPTORS[interceptor](
interceptors_function_processed
)
processed_sub_data = interceptors_function_processed
if prefix:
processed_sub_data = prefix + processed_sub_data
if suffix:
processed_sub_data = processed_sub_data + suffix
processed_array.append(copy.deepcopy(processed_sub_data))
original_arrays[array_name] = processed_array
return original_arrays
def expand_module_steps(content):
return [expand_protocol(x) for x in copy.deepcopy(content)]
def expand_protocol(protocol):
protocol["steps"] = [expand_step(x) for x in protocol["steps"]]
return protocol
def expand_step(step):
arrays = fuzzer_repeater_perform(find_repeaters(step, "", {}))
if arrays:
return generate_new_sub_steps(step, class_to_value(arrays_to_matrix(arrays)), arrays)
else:
# Minimum 1 step in array
return [step]
def generate_random_token(length=10):
return "".join(random.choice(string.ascii_lowercase) for _ in range(length))
def now(format="%Y-%m-%d %H:%M:%S"):
"""
get now date and time
Args:
format: the date and time model, default is "%Y-%m-%d %H:%M:%S"
Returns:
the date and time of now
"""
return datetime.datetime.now().strftime(format)
def select_maximum_cpu_core(mode):
cpu_count = multiprocessing.cpu_count()
if cpu_count - 1 == 0:
return 1
mode_core_mapping = {
"maximum": cpu_count - 1,
"high": cpu_count / 2,
"normal": cpu_count / 4,
"low": cpu_count / 8,
}
rounded = math.ceil if mode == "high" else math.floor
return int(max((rounded(mode_core_mapping.get(mode, 1)), 1)))
def sort_dictionary(dictionary):
etc_flag = "..." in dictionary
if etc_flag:
del dictionary["..."]
sorted_dictionary = {}
for key in sorted(dictionary):
sorted_dictionary[key] = dictionary[key]
if etc_flag:
sorted_dictionary["..."] = {}
return sorted_dictionary
def sanitize_path(path):
"""
Sanitize the file path to preven unathorized access
Args:
path: filepath(user input)
Returns:
sanitized_path
"""
return "_".join(
[
component
for component in re.split(r"[/\\]", path)
if re.match(r"^[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)?$", component)
]
)
def generate_compare_filepath(scan_id):
return "/report_compare_{date_time}_{scan_id}.json".format(
date_time=now(format="%Y_%m_%d_%H_%M_%S"),
scan_id=scan_id,
)