mirror of
https://github.com/lucaspalomodevelop/core.git
synced 2026-03-15 00:54:41 +00:00
Firewall / Rules, Livelog : performance improvements
o Since our filterlog does contain labels nowaydays (https://github.com/opnsense/core/issues/5014), we can stop parsing pfctl which can be quite time consuming o Rate limit "filter rule stats" configd action to prevent excessive pfctl access on filter pages
This commit is contained in:
parent
22166fc896
commit
9db6ca2fad
@ -87,19 +87,8 @@ def fetch_rule_details():
|
||||
if len(rule_md5) == 32 and set(rule_md5).issubset(HEX_DIGITS):
|
||||
rule_map[rule_md5] = ''.join(lbl.split('"')[2:]).strip().strip('# : ')
|
||||
|
||||
# use pfctl to create a list per rule number with the details found
|
||||
sp = subprocess.run(['/sbin/pfctl', '-vvPsr'], capture_output=True, text=True)
|
||||
for line in sp.stdout.strip().split('\n'):
|
||||
if line.startswith('@'):
|
||||
line_id = line.split()[0][1:]
|
||||
if line.find(' label ') > -1:
|
||||
rid = ''.join(line.split(' label ')[-1:]).strip()[1:].split('"')[0]
|
||||
if rid in rule_map:
|
||||
line_id_map[line_id] = {'rid': rid, 'label': rule_map[rid]}
|
||||
else:
|
||||
line_id_map[line_id] = {'rid': None, 'label': rid}
|
||||
|
||||
return {'line_ids': line_id_map, 'rule_map': rule_map}
|
||||
return rule_map
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@ -172,8 +161,8 @@ if __name__ == '__main__':
|
||||
rule.update(metadata)
|
||||
if rule['rid'] != '0':
|
||||
# rule id in latest record format, don't use rule sequence number in that case
|
||||
if rule['rid'] in running_conf_descr['rule_map']:
|
||||
rule['label'] = running_conf_descr['rule_map'][rule['rid']]
|
||||
if rule['rid'] in running_conf_descr:
|
||||
rule['label'] = running_conf_descr[rule['rid']]
|
||||
# obsolete md5 in log record
|
||||
else:
|
||||
rule['label'] = ''
|
||||
@ -183,14 +172,11 @@ if __name__ == '__main__':
|
||||
elif len(rulep) > 0 and len(rulep[-1]) == 32 and set(rulep[-1]).issubset(HEX_DIGITS):
|
||||
# rule id apended in record format, don't use rule sequence number in that case either
|
||||
rule['rid'] = rulep[-1]
|
||||
if rulep[-1] in running_conf_descr['rule_map']:
|
||||
rule['label'] = running_conf_descr['rule_map'][rulep[-1]]
|
||||
if rulep[-1] in running_conf_descr:
|
||||
rule['label'] = running_conf_descr[rulep[-1]]
|
||||
# obsolete md5 in log record
|
||||
else:
|
||||
rule['label'] = ''
|
||||
elif 'rulenr' in rule and rule['rulenr'] in running_conf_descr['line_ids']:
|
||||
rule['label'] = running_conf_descr['line_ids'][rule['rulenr']]['label']
|
||||
rule['rid'] = running_conf_descr['line_ids'][rule['rulenr']]['rid']
|
||||
|
||||
result.append(rule)
|
||||
|
||||
|
||||
@ -27,46 +27,77 @@
|
||||
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import time
|
||||
import ujson
|
||||
import tempfile
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
# rate limit pfctl calls, request every X seconds
|
||||
RATE_LIMIT_S = 60
|
||||
|
||||
if __name__ == '__main__':
|
||||
results = dict()
|
||||
hex_digits = set("0123456789abcdef")
|
||||
sp = subprocess.run(['/sbin/pfctl', '-sr', '-v'], capture_output=True, text=True)
|
||||
stats = dict()
|
||||
prev_line = ''
|
||||
for rline in sp.stdout.split('\n') + []:
|
||||
line = rline.strip()
|
||||
if len(line) == 0 or line[0] != '[':
|
||||
if prev_line.find(' label ') > -1:
|
||||
lbl = prev_line.split(' label ')[-1]
|
||||
if lbl.count('"') >= 2:
|
||||
rule_md5 = lbl.split('"')[1]
|
||||
if len(rule_md5) == 32 and set(rule_md5).issubset(hex_digits):
|
||||
if rule_md5 in results:
|
||||
# aggregate raw pf rules (a single rule in out ruleset could be expanded)
|
||||
for key in stats:
|
||||
if key in results[rule_md5]:
|
||||
if key == 'pf_rules':
|
||||
results[rule_md5][key] += 1
|
||||
else:
|
||||
results[rule_md5][key] += stats[key]
|
||||
else:
|
||||
results[rule_md5][key] = stats[key]
|
||||
else:
|
||||
results[rule_md5] = stats
|
||||
# reset for next rule
|
||||
prev_line = line
|
||||
stats = {'pf_rules': 1}
|
||||
elif line[0] == '[' and line.find('Evaluations') > 0:
|
||||
parts = line.strip('[ ]').replace(':', ' ').split()
|
||||
for i in range(0, len(parts)-1, 2):
|
||||
if parts[i+1].isdigit():
|
||||
stats[parts[i].lower()] = int(parts[i+1])
|
||||
cache_filename = '/tmp/cache_filter_rulestats.json'
|
||||
fstat = os.stat(cache_filename) if os.path.isfile(cache_filename) else None
|
||||
fhandle = open(cache_filename, 'a+')
|
||||
try:
|
||||
fhandle.seek(0)
|
||||
results = ujson.loads(fhandle.read())
|
||||
except ValueError:
|
||||
results = dict()
|
||||
if fstat is None or (time.time() - fstat.st_mtime) > RATE_LIMIT_S or len(results) == 0:
|
||||
if len(results) == 0:
|
||||
# lock blocking, nothing to return yet
|
||||
fcntl.flock(fhandle, fcntl.LOCK_EX)
|
||||
else:
|
||||
try:
|
||||
fcntl.flock(fhandle, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
except IOError:
|
||||
# already locked, return previous content
|
||||
print (ujson.dumps(results))
|
||||
sys.exit(0)
|
||||
|
||||
# output
|
||||
print (ujson.dumps(results))
|
||||
results = dict()
|
||||
hex_digits = set("0123456789abcdef")
|
||||
sp = subprocess.run(['/sbin/pfctl', '-sr', '-v'], capture_output=True, text=True)
|
||||
stats = dict()
|
||||
prev_line = ''
|
||||
for rline in sp.stdout.split('\n') + []:
|
||||
line = rline.strip()
|
||||
if len(line) == 0 or line[0] != '[':
|
||||
if prev_line.find(' label ') > -1:
|
||||
lbl = prev_line.split(' label ')[-1]
|
||||
if lbl.count('"') >= 2:
|
||||
rule_md5 = lbl.split('"')[1]
|
||||
if len(rule_md5) == 32 and set(rule_md5).issubset(hex_digits):
|
||||
if rule_md5 in results:
|
||||
# aggregate raw pf rules (a single rule in out ruleset could be expanded)
|
||||
for key in stats:
|
||||
if key in results[rule_md5]:
|
||||
if key == 'pf_rules':
|
||||
results[rule_md5][key] += 1
|
||||
else:
|
||||
results[rule_md5][key] += stats[key]
|
||||
else:
|
||||
results[rule_md5][key] = stats[key]
|
||||
else:
|
||||
results[rule_md5] = stats
|
||||
# reset for next rule
|
||||
prev_line = line
|
||||
stats = {'pf_rules': 1}
|
||||
elif line[0] == '[' and line.find('Evaluations') > 0:
|
||||
parts = line.strip('[ ]').replace(':', ' ').split()
|
||||
for i in range(0, len(parts)-1, 2):
|
||||
if parts[i+1].isdigit():
|
||||
stats[parts[i].lower()] = int(parts[i+1])
|
||||
output = ujson.dumps(results)
|
||||
fhandle.seek(0)
|
||||
fhandle.truncate()
|
||||
fhandle.write(output)
|
||||
fhandle.close()
|
||||
print(output)
|
||||
else:
|
||||
# output
|
||||
print (ujson.dumps(results))
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user