Skip to content

Commit

Permalink
WIP - poll with python
Browse files Browse the repository at this point in the history
This reduces the roundtrips and allows us to maintain "temporary"
mount points for btrfs filesystems that are not otherwise mounted.

The process will be shutdown when the cockpit session is closed by
cockpit-ws, so we get reliable cleanup.
  • Loading branch information
mvollmer committed Aug 16, 2024
1 parent 01357bd commit 03b3bbf
Show file tree
Hide file tree
Showing 4 changed files with 271 additions and 115 deletions.
193 changes: 193 additions & 0 deletions pkg/storaged/btrfs/btrfs-tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
#! /usr/bin/python3

# btrfs-tool -- Query and monitor btrfs filesystems
#
# This program monitors all btrfs filesystems and reports their
# subvolumes and other things.
#
# It can do that continously, or as a one shot operation. The tool
# mounts btrfs filesystems as necessary to retrieve the requested
# information, but does it in a polite way: they are mounted once and
# then left mounted until that is no longer needed. Typically, you
# might see some mounts when a Cockpit session starts, and the
# corresponding unmounts when it ends.
#
# This tool can be run multiple times concurrently, and it wont get
# confused.

import contextlib
import subprocess
import json
import re
import sys
import time
import os
import fcntl
import select

Check notice

Code scanning / CodeQL

Unused import Note

Import of 'select' is not used.
import signal

TMP_MP_DIR = "/var/lib/cockpit/btrfs"

@contextlib.contextmanager
def atomic_file(path):
os.makedirs(TMP_MP_DIR, exist_ok=True)
fd = os.open(path, os.O_RDWR | os.O_CREAT)
fcntl.flock(fd, fcntl.LOCK_EX)
data = os.read(fd, 100000)
blob = json.loads(data) if len(data) > 0 else { }
try:
yield blob
data = json.dumps(blob).encode() + b"\n"
os.lseek(fd, 0, os.SEEK_SET)
os.truncate(fd, 0)
os.write(fd, data)
finally:
os.close(fd)

def list_filesystems():
output = json.loads(subprocess.check_output(["lsblk", "-Jplno", "NAME,FSTYPE,UUID,MOUNTPOINTS"]))
filesystems = {}
for b in output['blockdevices']:
if b['fstype'] == "btrfs":
uuid = b['uuid']
mps = list(filter(lambda x: x is not None and not x.startswith(TMP_MP_DIR), b['mountpoints']))
if uuid not in filesystems:
filesystems[uuid] = { 'uuid': uuid, 'devices': [ b['name'] ], 'mountpoints': mps }
else:
filesystems[uuid]['devices'] += [ b['name'] ]
filesystems[uuid]['mountpoints'] += mps
return filesystems

tmp_mountpoints = set()

def add_tmp_mountpoint(uuid, dev):
global tmp_mountpoints
if uuid not in tmp_mountpoints:
sys.stderr.write(f"ADDING {uuid}\n")
tmp_mountpoints.add(uuid)
with atomic_file(TMP_MP_DIR + "/db") as db:
if uuid in db and db[uuid] > 0:
db[uuid] += 1
else:
db[uuid] = 1
# XXX - mount as read-only? That interferes with other mounts.
dir = TMP_MP_DIR + "/" + uuid
sys.stderr.write(f"MOUNTING {dir}\n")
os.makedirs(dir, mode=0x400, exist_ok=True)
subprocess.check_call(["mount", dev, dir])

def remove_tmp_mountpoint(uuid):
global tmp_mountpoints
if uuid in tmp_mountpoints:
sys.stderr.write(f"REMOVING {uuid}\n")
tmp_mountpoints.remove(uuid)
with atomic_file(TMP_MP_DIR + "/db") as db:
if db[uuid] == 1:
dir = TMP_MP_DIR + "/" + uuid
try:
sys.stderr.write(f"UNMOUNTING {dir}\n")
subprocess.check_call(["umount", dir])
subprocess.check_call(["rmdir", dir])
except:

Check notice

Code scanning / CodeQL

Except block handles 'BaseException' Note

Except block directly handles BaseException.
# XXX - log error, try harder?
pass
del db[uuid]
else:
db[uuid] -= 1

def remove_all_tmp_mountpoints():
for mp in set(tmp_mountpoints):
remove_tmp_mountpoint(mp)

def ensure_mount_point(fs):
if len(fs['mountpoints']) > 0:
remove_tmp_mountpoint(fs['uuid'])
return fs['mountpoints'][0]
else:
add_tmp_mountpoint(fs['uuid'], fs['devices'][0])
return TMP_MP_DIR + "/" + fs['uuid']

def get_subvolume_info(fs):
mp = ensure_mount_point(fs)
try:
lines = subprocess.check_output(["btrfs", "subvolume", "list", "-apuq", mp]).splitlines()
subvols = []
for line in lines:
match = re.match(b"ID (\\d+).*parent (\\d+).*parent_uuid (.*)uuid (.*) path (<FS_TREE>/)?(.*)", line);
if match:
subvols += [
{
'pathname': match[6].decode(errors='replace'),
'id': int(match[1]),
'parent': int(match[2]),
'uuid': match[4].decode(),
'parent_uuid': None if match[3][0] == ord("-") else match[3].decode()
}
]
return subvols
except:

Check notice

Code scanning / CodeQL

Except block handles 'BaseException' Note

Except block directly handles BaseException.
# XXX - export error message?
return None

def get_default_subvolume(fs):

Check notice

Code scanning / CodeQL

Explicit returns mixed with implicit (fall through) returns Note

Mixing implicit and explicit returns may indicate an error as implicit returns always return None.
mp = ensure_mount_point(fs)
output = subprocess.check_output(["btrfs", "subvolume", "get-default", mp])
match = re.match(b"ID (\\d+).*", output);
if match:
return int(match[1]);

def get_usages(fs):
output = subprocess.check_output(["btrfs", "filesystem", "show", "--raw", fs['uuid']])
usages = {}
for line in output.splitlines():
match = re.match(b".*used\\s+(\\d+)\\s+path\\s+([\\w/]+).*", line)
if match:
usages[match[2].decode()] = int(match[1]);
return usages;

def poll():
sys.stderr.write("POLL\n")
filesystems = list_filesystems()
info = { }
for fs in filesystems.values():
info[fs['uuid']] = {
'subvolumes': get_subvolume_info(fs),
'default_subvolume': get_default_subvolume(fs),
'usages': get_usages(fs),
}
return info

def cmd_monitor():
old_infos = poll()
sys.stdout.write(json.dumps(old_infos) + "\n")
sys.stdout.flush()
while True:
time.sleep(5.0)
new_infos = poll()
if new_infos != old_infos:
sys.stdout.write(json.dumps(new_infos) + "\n")
sys.stdout.flush()
old_infos = new_infos

def cmd_poll():
infos = poll()
sys.stdout.write(json.dumps(infos) + "\n")
sys.stdout.flush()

def cmd(args):
if len(args) > 1:
if args[1] == "poll":
cmd_poll()
elif args[1] == "monitor":
cmd_monitor()

def main(args):
signal.signal(signal.SIGTERM, lambda _signo, _stack: sys.exit(0))
try:
cmd(args)
except RuntimeError as err:
sys.stderr.write(str(err) + "\n")
finally:
remove_all_tmp_mountpoints()

main(sys.argv)
172 changes: 62 additions & 110 deletions pkg/storaged/client.js
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ import { export_mount_point_mapping } from "./anaconda.jsx";

import { dequal } from 'dequal/lite';

import btrfs_tool_py from "./btrfs/btrfs-tool.py";

/* STORAGED CLIENT
*/

Expand Down Expand Up @@ -200,114 +202,6 @@ client.swap_sizes = instance_sampler([{ name: "swapdev.length" },
{ name: "swapdev.free" },
], "direct");

export async function btrfs_poll() {
const usage_regex = /used\s+(?<used>\d+)\s+path\s+(?<device>[\w/]+)/;
if (!client.uuids_btrfs_subvols)
client.uuids_btrfs_subvols = { };
if (!client.uuids_btrfs_usage)
client.uuids_btrfs_usage = { };
if (!client.uuids_btrfs_default_subvol)
client.uuids_btrfs_default_subvol = { };
if (!client.uuids_btrfs_volume)
return;

if (!client.superuser.allowed || !client.features.btrfs) {
return;
}

const uuids_subvols = { };
const uuids_usage = { };
const btrfs_default_subvol = { };
for (const uuid of Object.keys(client.uuids_btrfs_volume)) {
const blocks = client.uuids_btrfs_blocks[uuid];
if (!blocks)
continue;

// In multi device setups MountPoints can be on either of the block devices, so try them all.
const MountPoints = blocks.map(block => {
return client.blocks_fsys[block.path];
}).map(block_fsys => block_fsys.MountPoints).reduce((accum, current) => accum.concat(current));
const mp = MountPoints[0];
if (mp) {
const mount_point = utils.decode_filename(mp);
try {
// HACK: UDisks GetSubvolumes method uses `subvolume list -p` which
// does not show the full subvolume path which we want to show in the UI
//
// $ btrfs subvolume list -p /run/butter
// ID 256 gen 7 parent 5 top level 5 path one
// ID 257 gen 7 parent 256 top level 256 path two
// ID 258 gen 7 parent 257 top level 257 path two/three/four
//
// $ btrfs subvolume list -ap /run/butter
// ID 256 gen 7 parent 5 top level 5 path <FS_TREE>/one
// ID 257 gen 7 parent 256 top level 256 path one/two
// ID 258 gen 7 parent 257 top level 257 path <FS_TREE>/one/two/three/four
const output = await cockpit.spawn(["btrfs", "subvolume", "list", "-apuq", mount_point], { superuser: "require", err: "message" });
const subvols = [{ pathname: "/", id: 5, parent: null }];
for (const line of output.split("\n")) {
const m = line.match(/ID (\d+).*parent (\d+).*parent_uuid (.*)uuid (.*) path (<FS_TREE>\/)?(.*)/);
if (m) {
// The parent uuid is the uuid of which this subvolume is a snapshot.
// https://github.com/torvalds/linux/blob/8d025e2092e29bfd13e56c78e22af25fac83c8ec/include/uapi/linux/btrfs.h#L885
let parent_uuid = m[3].trim();
// BTRFS_UUID_SIZE is 16
parent_uuid = parent_uuid.length < 16 ? null : parent_uuid;
subvols.push({ pathname: m[6], id: Number(m[1]), parent: Number(m[2]), uuid: m[4], parent_uuid });
}
}
uuids_subvols[uuid] = subvols;
} catch (err) {
console.warn(`unable to obtain subvolumes for mount point ${mount_point}`, err);
}

// HACK: Obtain the default subvolume, required for mounts in which do not specify a subvol and subvolid.
// In the future can be obtained via UDisks, it requires the btrfs partition to be mounted somewhere.
// https://github.com/storaged-project/udisks/commit/b6966b7076cd837f9d307eef64beedf01bc863ae
try {
const output = await cockpit.spawn(["btrfs", "subvolume", "get-default", mount_point], { superuser: "require", err: "message" });
const id_match = output.match(/ID (\d+).*/);
if (id_match)
btrfs_default_subvol[uuid] = Number(id_match[1]);
} catch (err) {
console.warn(`unable to obtain default subvolume for mount point ${mount_point}`, err);
}

// HACK: UDisks should expose a better btrfs API with btrfs device information
// https://github.com/storaged-project/udisks/issues/1232
// TODO: optimise into just parsing one `btrfs filesystem show`?
try {
const usage_output = await cockpit.spawn(["btrfs", "filesystem", "show", "--raw", uuid], { superuser: "require", err: "message" });
const usages = {};
for (const line of usage_output.split("\n")) {
const match = usage_regex.exec(line);
if (match) {
const { used, device } = match.groups;
usages[device] = used;
}
}
uuids_usage[uuid] = usages;
} catch (err) {
console.warn(`btrfs filesystem show ${uuid}`, err);
}
} else {
uuids_subvols[uuid] = null;
uuids_usage[uuid] = null;
}
}

if (!dequal(client.uuids_btrfs_subvols, uuids_subvols) || !dequal(client.uuids_btrfs_usage, uuids_usage) ||
!dequal(client.uuids_btrfs_default_subvol, btrfs_default_subvol)) {
debug("btrfs_pol new subvols:", uuids_subvols);
client.uuids_btrfs_subvols = uuids_subvols;
client.uuids_btrfs_usage = uuids_usage;
debug("btrfs_pol usage:", uuids_usage);
client.uuids_btrfs_default_subvol = btrfs_default_subvol;
debug("btrfs_pol default subvolumes:", btrfs_default_subvol);
client.update();
}
}

function btrfs_findmnt_poll() {
if (!client.btrfs_mounts)
client.btrfs_mounts = { };
Expand Down Expand Up @@ -391,15 +285,73 @@ function btrfs_findmnt_poll() {
});
}

function btrfs_update(data) {
if (!client.uuids_btrfs_subvols)
client.uuids_btrfs_subvols = { };
if (!client.uuids_btrfs_usage)
client.uuids_btrfs_usage = { };
if (!client.uuids_btrfs_default_subvol)
client.uuids_btrfs_default_subvol = { };

const uuids_subvols = { };
const uuids_usage = { };
const default_subvol = { };

for (const uuid in data) {
console.log("DATA", uuid, data[uuid]);
uuids_subvols[uuid] = [{ pathname: "/", id: 5, parent: null }].concat(data[uuid].subvolumes);
uuids_usage[uuid] = data[uuid].usages;
default_subvol[uuid] = data[uuid].default_subvolume;
}

if (!dequal(client.uuids_btrfs_subvols, uuids_subvols) || !dequal(client.uuids_btrfs_usage, uuids_usage) ||
!dequal(client.uuids_btrfs_default_subvol, default_subvol)) {
debug("btrfs_pol new subvols:", uuids_subvols);
client.uuids_btrfs_subvols = uuids_subvols;
client.uuids_btrfs_usage = uuids_usage;
debug("btrfs_pol usage:", uuids_usage);
client.uuids_btrfs_default_subvol = default_subvol;
debug("btrfs_pol default subvolumes:", default_subvol);
client.update();
}
}

export async function btrfs_poll() {
const data = JSON.parse(await python.spawn(btrfs_tool_py, ["poll"], { superuser: "require" }));
btrfs_update(data);
}

function btrfs_start_monitor() {
if (!client.superuser.allowed || !client.features.btrfs) {
return;
}

const channel = python.spawn(btrfs_tool_py, ["monitor"], { superuser: "require" });
let buf = "";

channel.stream(output => {
buf += output;
const lines = buf.split("\n");
buf = lines[lines.length - 1];
if (lines.length >= 2) {
const data = JSON.parse(lines[lines.length - 2]);
btrfs_update(data);
}
});

channel.catch(err => {
console.log("BTRFS MONITOR ERROR", err);
});
}

function btrfs_start_polling() {
debug("starting polling for btrfs subvolumes");
window.setInterval(btrfs_poll, 5000);
client.uuids_btrfs_subvols = { };
client.uuids_btrfs_usage = { };
client.uuids_btrfs_default_subvol = { };
client.btrfs_mounts = { };
btrfs_poll();
btrfs_findmnt_poll();
btrfs_start_monitor();
}

/* Derived indices.
Expand Down
Loading

0 comments on commit 03b3bbf

Please sign in to comment.