Reporting: Unbound DNS - move duckdb export functionality to helper and always dump before system shutdown to be able to ship duckdb library upgrades on minor releases in the future.

In an attempt to keep the database clean and hopefully error prone, the logger will export/import now every ~24 hours. As these operations are rather quick it should help to minimize the footprint as well (duckdb files keep growing after deletes).

closes https://github.com/opnsense/core/issues/7049
This commit is contained in:
Ad Schellevis 2024-01-11 14:54:28 +01:00
parent 340b314f57
commit 13408c8206
5 changed files with 62 additions and 23 deletions

2
plist
View File

@ -145,12 +145,12 @@
/usr/local/etc/rc.syshook.d/start/90-sysctl
/usr/local/etc/rc.syshook.d/start/95-beep
/usr/local/etc/rc.syshook.d/stop/05-beep
/usr/local/etc/rc.syshook.d/stop/20-unbound-duckdb.py
/usr/local/etc/rc.syshook.d/stop/80-freebsd
/usr/local/etc/rc.syshook.d/stop/90-backup
/usr/local/etc/rc.syshook.d/stop/99-config
/usr/local/etc/rc.syshook.d/update/10-refresh
/usr/local/etc/rc.syshook.d/upgrade/20-squid-plugin.php
/usr/local/etc/rc.syshook.d/upgrade/20-unbound-duckdb.py
/usr/local/etc/ssl/opnsense.cnf
/usr/local/etc/strongswan.opnsense.d/README
/usr/local/etc/unbound.opnsense.d/README

View File

@ -26,19 +26,19 @@
"""
import sys
import os
import shutil
import glob
sys.path.insert(0, "/usr/local/opnsense/site-python")
from duckdb_helper import DbConnection
from duckdb_helper import export_database
# export database in case the new storage version doesn't match
with DbConnection('/var/unbound/data/unbound.duckdb', read_only=True) as db:
if db is not None and db.connection is not None:
os.makedirs('/var/cache/unbound.duckdb', mode=0o750, exist_ok=True)
shutil.chown('/var/cache/unbound.duckdb', 'unbound', 'unbound')
db.connection.execute("EXPORT DATABASE '/var/cache/unbound.duckdb';")
for filename in glob.glob('/var/cache/unbound.duckdb/*'):
shutil.chown(filename, 'unbound', 'unbound')
if os.path.isfile('/var/unbound/data/unbound.duckdb'):
if os.path.isfile('/var/run/unbound_logger.pid'):
pid = open('/var/run/unbound_logger.pid').read().strip()
try:
os.kill(int(pid), 9)
except ProcessLookupError:
pass
if export_database('/var/unbound/data/unbound.duckdb', '/var/cache/unbound.duckdb', 'unbound', 'unbound'):
print('Unbound DNS database exported successfully.')
else:
print('Unbound DNS database export not required.')

View File

@ -38,14 +38,16 @@ import signal
import socket
import duckdb
sys.path.insert(0, "/usr/local/opnsense/site-python")
from duckdb_helper import DbConnection, StorageVersionException, restore_database
from duckdb_helper import DbConnection, StorageVersionException, restore_database, export_database
class DNSReader:
def __init__(self, source_pipe, target_db, flush_interval):
def __init__(self, source_pipe, target_db, flush_interval, backup_dir):
self.source_pipe = source_pipe
self.target_db = target_db
self.backup_dir = backup_dir
self.timer = 0
self.cleanup_timer = 0
self.imp_exp_timer = time.time()
self.flush_interval = flush_interval
self.buffer = list()
self.selector = selectors.DefaultSelector()
@ -178,6 +180,23 @@ class DNSReader:
except duckdb.ConstraintException:
db.connection.execute("UPDATE client SET hostname=? WHERE ipaddr=?", [host, client])
# duckdb database files don't like records to be deleted over time, which causes unnecessary growth.
# By performing an export/import on regular bases (roughly each 24 hours), we keep the file more managable.
if (now - self.imp_exp_timer) > 86400:
self.imp_exp_timer = now
if export_database(self.target_db, self.backup_dir, 'unbound', 'unbound'):
restore_database(self.backup_dir, self.target_db)
syslog.syslog(
syslog.LOG_NOTICE,
'Database auto restore from %s for cleanup reasons in %.2f seconds' % (
self.backup_dir,
time.time() - now
)
)
else:
syslog.syslog(syslog.LOG_ERROR, "unable to export database to %s" % self.backup_dir)
return True
def run_logger(self):
@ -219,8 +238,8 @@ class DNSReader:
# unbound closed pipe
self.close_logger()
def run(pipe, target_db, flush_interval):
r = DNSReader(pipe, target_db, flush_interval)
def run(pipe, target_db, flush_interval, backup_dir):
r = DNSReader(pipe, target_db, flush_interval, backup_dir)
try:
r.run_logger()
except InterruptedError:
@ -247,7 +266,6 @@ if __name__ == '__main__':
syslog.LOG_NOTICE,
'Database restored from %s due to version mismatch' % inputargs.backup_dir
)
# XXX: remove contents of backup_dir?
else:
syslog.syslog(syslog.LOG_ERR, 'Restore needed, but backup locked, exit...')
sys.exit(-1)
@ -262,4 +280,4 @@ if __name__ == '__main__':
syslog.syslog(syslog.LOG_NOTICE, 'Backgrounding unbound logging backend.')
run(inputargs.pipe, inputargs.targetdb, inputargs.flush_interval)
run(inputargs.pipe, inputargs.targetdb, inputargs.flush_interval, inputargs.backup_dir)

View File

@ -27,7 +27,6 @@
"""
import argparse
import sys
import glob
import os
sys.path.insert(0, "/usr/local/opnsense/site-python")
@ -41,13 +40,14 @@ if __name__ == '__main__':
if os.path.isfile("%s/load.sql" % inputargs.backup_dir):
if os.path.isfile('/var/run/unbound_logger.pid'):
pid = open('/var/run/unbound_logger.pid').read().strip()
os.kill(int(pid), 9)
try:
os.kill(int(pid), 9)
except ProcessLookupError:
pass
if os.path.isfile(inputargs.targetdb):
os.unlink(inputargs.targetdb)
if restore_database(inputargs.backup_dir, inputargs.targetdb):
for filename in glob.glob('%s/*' % inputargs.backup_dir):
os.unlink(filename)
print("restored, backup removed")
print("restored")
else:
print("unable to restore")
else:

View File

@ -29,6 +29,8 @@
import os
import duckdb
import fcntl
import glob
import shutil
class StorageVersionException(Exception):
@ -133,3 +135,22 @@ def restore_database(path, target):
raise FileNotFoundError(lock_fn)
return True
def export_database(source, target, owner_uid='root', owner_gid='wheel'):
"""
:param source: source database
:param target: target export directory
:param owner_uid: owner (user)
:param owner_gid: owner (group)
"""
with DbConnection(source, read_only=True) as db:
if db is not None and db.connection is not None:
os.makedirs(target, mode=0o750, exist_ok=True)
shutil.chown(target, 'unbound', 'unbound')
db.connection.execute("EXPORT DATABASE '%s';" % target)
for filename in glob.glob('%s/*'% target):
shutil.chown(filename, owner_uid, owner_gid)
return True
return False