summaryrefslogtreecommitdiff
path: root/src/udev/udevd.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/udev/udevd.c')
-rw-r--r--src/udev/udevd.c538
1 files changed, 266 insertions, 272 deletions
diff --git a/src/udev/udevd.c b/src/udev/udevd.c
index 299fda8a3a..34e88af539 100644
--- a/src/udev/udevd.c
+++ b/src/udev/udevd.c
@@ -41,6 +41,8 @@
#include <sys/inotify.h>
#include "sd-daemon.h"
+#include "sd-event.h"
+#include "event-util.h"
#include "rtnl-util.h"
#include "cgroup-util.h"
#include "process-util.h"
@@ -62,6 +64,7 @@ static usec_t arg_event_timeout_warn_usec = 180 * USEC_PER_SEC / 3;
typedef struct Manager {
struct udev *udev;
+ sd_event *event;
Hashmap *workers;
struct udev_list_node events;
char *cgroup;
@@ -74,17 +77,16 @@ typedef struct Manager {
struct udev_monitor *monitor;
struct udev_ctrl *ctrl;
struct udev_ctrl_connection *ctrl_conn_blocking;
-
- int fd_ep;
- int fd_ctrl;
- int fd_uevent;
- int fd_signal;
int fd_inotify;
- int fd_worker;
int worker_watch[2];
+ sd_event_source *ctrl_event;
+ sd_event_source *uevent_event;
+ sd_event_source *inotify_event;
+
+ usec_t last_usec;
+
bool stop_exec_queue:1;
- bool reload:1;
bool exit:1;
} Manager;
@@ -110,8 +112,8 @@ struct event {
dev_t devnum;
int ifindex;
bool is_block;
- usec_t start_usec;
- bool warned;
+ sd_event_source *timeout_warning;
+ sd_event_source *timeout;
};
static inline struct event *node_to_event(struct udev_list_node *node) {
@@ -151,6 +153,9 @@ static void event_free(struct event *event) {
udev_device_unref(event->dev);
udev_device_unref(event->dev_kernel);
+ sd_event_source_unref(event->timeout_warning);
+ sd_event_source_unref(event->timeout);
+
if (event->worker)
event->worker->event = NULL;
@@ -252,7 +257,12 @@ static int on_event_timeout_warning(sd_event_source *s, uint64_t usec, void *use
}
static void worker_attach_event(struct worker *worker, struct event *event) {
+ sd_event *e;
+ uint64_t usec;
+ int r;
+
assert(worker);
+ assert(worker->manager);
assert(event);
assert(!event->worker);
assert(!worker->event);
@@ -260,9 +270,19 @@ static void worker_attach_event(struct worker *worker, struct event *event) {
worker->state = WORKER_RUNNING;
worker->event = event;
event->state = EVENT_RUNNING;
- event->start_usec = now(CLOCK_MONOTONIC);
- event->warned = false;
event->worker = worker;
+
+ e = worker->manager->event;
+
+ r = sd_event_now(e, clock_boottime_or_monotonic(), &usec);
+ if (r < 0)
+ return;
+
+ (void) sd_event_add_time(e, &event->timeout_warning, clock_boottime_or_monotonic(),
+ usec + arg_event_timeout_warn_usec, USEC_PER_SEC, on_event_timeout_warning, event);
+
+ (void) sd_event_add_time(e, &event->timeout, clock_boottime_or_monotonic(),
+ usec + arg_event_timeout_usec, USEC_PER_SEC, on_event_timeout, event);
}
static void manager_free(Manager *manager) {
@@ -271,7 +291,12 @@ static void manager_free(Manager *manager) {
udev_builtin_exit(manager->udev);
+ sd_event_source_unref(manager->ctrl_event);
+ sd_event_source_unref(manager->uevent_event);
+ sd_event_source_unref(manager->inotify_event);
+
udev_unref(manager->udev);
+ sd_event_unref(manager->event);
manager_workers_free(manager);
event_queue_cleanup(manager, EVENT_UNDEF);
@@ -283,8 +308,6 @@ static void manager_free(Manager *manager) {
udev_rules_unref(manager->rules);
free(manager->cgroup);
- safe_close(manager->fd_ep);
- safe_close(manager->fd_signal);
safe_close(manager->fd_inotify);
safe_close_pair(manager->worker_watch);
@@ -328,17 +351,23 @@ static void worker_spawn(Manager *manager, struct event *event) {
dev = event->dev;
event->dev = NULL;
+ unsetenv("NOTIFY_SOCKET");
+
manager_workers_free(manager);
event_queue_cleanup(manager, EVENT_UNDEF);
manager->monitor = udev_monitor_unref(manager->monitor);
manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
manager->ctrl = udev_ctrl_unref(manager->ctrl);
-
- manager->fd_ep = safe_close(manager->fd_ep);
- manager->fd_signal = safe_close(manager->fd_signal);
+ manager->ctrl_conn_blocking = udev_ctrl_connection_unref(manager->ctrl_conn_blocking);
manager->worker_watch[READ_END] = safe_close(manager->worker_watch[READ_END]);
+ manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
+ manager->uevent_event = sd_event_source_unref(manager->uevent_event);
+ manager->inotify_event = sd_event_source_unref(manager->inotify_event);
+
+ manager->event = sd_event_unref(manager->event);
+
sigfillset(&mask);
fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
if (fd_signal < 0) {
@@ -379,9 +408,6 @@ static void worker_spawn(Manager *manager, struct event *event) {
goto out;
}
- /* needed for SIGCHLD/SIGTERM in spawn() */
- udev_event->fd_signal = fd_signal;
-
if (arg_exec_delay > 0)
udev_event->exec_delay = arg_exec_delay;
@@ -454,11 +480,6 @@ skip:
udev_device_unref(dev);
dev = NULL;
- if (udev_event->sigterm) {
- udev_event_unref(udev_event);
- goto out;
- }
-
udev_event_unref(udev_event);
/* wait for more device messages from main udevd, or term signal */
@@ -689,11 +710,104 @@ static bool is_devpath_busy(Manager *manager, struct event *event) {
return false;
}
+static int on_exit_timeout(sd_event_source *s, uint64_t usec, void *userdata) {
+ Manager *manager = userdata;
+
+ assert(manager);
+
+ log_error_errno(ETIMEDOUT, "giving up waiting for workers to finish");
+
+ sd_event_exit(manager->event, -ETIMEDOUT);
+
+ return 1;
+}
+
+static void manager_exit(Manager *manager) {
+ uint64_t usec;
+ int r;
+
+ assert(manager);
+
+ manager->exit = true;
+
+ sd_notify(false,
+ "STOPPING=1\n"
+ "STATUS=Starting shutdown...");
+
+ /* close sources of new events and discard buffered events */
+ manager->ctrl = udev_ctrl_unref(manager->ctrl);
+ manager->ctrl_event = sd_event_source_unref(manager->ctrl_event);
+
+ manager->fd_inotify = safe_close(manager->fd_inotify);
+ manager->inotify_event = sd_event_source_unref(manager->inotify_event);
+
+ manager->monitor = udev_monitor_unref(manager->monitor);
+ manager->uevent_event = sd_event_source_unref(manager->uevent_event);
+
+ /* discard queued events and kill workers */
+ event_queue_cleanup(manager, EVENT_QUEUED);
+ manager_kill_workers(manager);
+
+ r = sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec);
+ if (r < 0)
+ return;
+
+ r = sd_event_add_time(manager->event, NULL, clock_boottime_or_monotonic(),
+ usec + 30 * USEC_PER_SEC, USEC_PER_SEC, on_exit_timeout, manager);
+ if (r < 0)
+ return;
+}
+
+/* reload requested, HUP signal received, rules changed, builtin changed */
+static void manager_reload(Manager *manager) {
+
+ assert(manager);
+
+ sd_notify(false,
+ "RELOADING=1\n"
+ "STATUS=Flushing configuration...");
+
+ manager_kill_workers(manager);
+ manager->rules = udev_rules_unref(manager->rules);
+ udev_builtin_exit(manager->udev);
+
+ sd_notify(false,
+ "READY=1\n"
+ "STATUS=Processing...");
+}
+
static void event_queue_start(Manager *manager) {
struct udev_list_node *loop;
+ usec_t usec;
+ int r;
assert(manager);
+ if (udev_list_node_is_empty(&manager->events) ||
+ manager->exit || manager->stop_exec_queue)
+ return;
+
+ r = sd_event_now(manager->event, clock_boottime_or_monotonic(), &usec);
+ if (r >= 0) {
+ /* check for changed config, every 3 seconds at most */
+ if (manager->last_usec == 0 ||
+ (usec - manager->last_usec) > 3 * USEC_PER_SEC) {
+ if (udev_rules_check_timestamp(manager->rules) ||
+ udev_builtin_validate(manager->udev))
+ manager_reload(manager);
+
+ manager->last_usec = usec;
+ }
+ }
+
+ udev_builtin_init(manager->udev);
+
+ if (!manager->rules) {
+ manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
+ if (!manager->rules)
+ return;
+ }
+
udev_list_node_foreach(loop, &manager->events) {
struct event *event = node_to_event(loop);
@@ -787,6 +901,9 @@ static int on_worker(sd_event_source *s, int fd, uint32_t revents, void *userdat
event_free(worker->event);
}
+ /* we have free workers, try to schedule events */
+ event_queue_start(manager);
+
return 1;
}
@@ -803,6 +920,9 @@ static int on_uevent(sd_event_source *s, int fd, uint32_t revents, void *userdat
r = event_queue_insert(manager, dev);
if (r < 0)
udev_device_unref(dev);
+ else
+ /* we have fresh events, try to schedule them */
+ event_queue_start(manager);
}
return 1;
@@ -841,11 +961,12 @@ static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userd
if (udev_ctrl_get_start_exec_queue(ctrl_msg) > 0) {
log_debug("udevd message (START_EXEC_QUEUE) received");
manager->stop_exec_queue = false;
+ event_queue_start(manager);
}
if (udev_ctrl_get_reload(ctrl_msg) > 0) {
log_debug("udevd message (RELOAD) received");
- manager->reload = true;
+ manager_reload(manager);
}
str = udev_ctrl_get_set_env(ctrl_msg);
@@ -884,7 +1005,7 @@ static int on_ctrl_msg(sd_event_source *s, int fd, uint32_t revents, void *userd
if (udev_ctrl_get_exit(ctrl_msg) > 0) {
log_debug("udevd message (EXIT) received");
- manager->exit = true;
+ manager_exit(manager);
/* keep reference to block the client until we exit
TODO: deal with several blocking exit requests */
manager->ctrl_conn_blocking = udev_ctrl_connection_ref(ctrl_conn);
@@ -1042,7 +1163,7 @@ static int on_sigterm(sd_event_source *s, const struct signalfd_siginfo *si, voi
assert(manager);
- manager->exit = true;
+ manager_exit(manager);
return 1;
}
@@ -1052,7 +1173,7 @@ static int on_sighup(sd_event_source *s, const struct signalfd_siginfo *si, void
assert(manager);
- manager->reload = true;
+ manager_reload(manager);
return 1;
}
@@ -1107,6 +1228,36 @@ static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, voi
worker_free(worker);
}
+ /* we can start new workers, try to schedule events */
+ event_queue_start(manager);
+
+ return 1;
+}
+
+static int on_post(sd_event_source *s, void *userdata) {
+ Manager *manager = userdata;
+ int r;
+
+ assert(manager);
+
+ if (udev_list_node_is_empty(&manager->events)) {
+ /* no pending events */
+ if (!hashmap_isempty(manager->workers)) {
+ /* there are idle workers */
+ log_debug("cleanup idle workers");
+ manager_kill_workers(manager);
+ } else {
+ /* we are idle */
+ if (manager->exit) {
+ r = sd_event_exit(manager->event, 0);
+ if (r < 0)
+ return r;
+ } else if (manager->cgroup)
+ /* cleanup possible left-over processes in our cgroup */
+ cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
+ }
+ }
+
return 1;
}
@@ -1288,6 +1439,7 @@ static int parse_argv(int argc, char *argv[]) {
static int manager_new(Manager **ret) {
_cleanup_(manager_freep) Manager *manager = NULL;
+ int r, fd_ctrl, fd_uevent;
assert(ret);
@@ -1295,10 +1447,6 @@ static int manager_new(Manager **ret) {
if (!manager)
return log_oom();
- manager->fd_ep = -1;
- manager->fd_ctrl = -1;
- manager->fd_uevent = -1;
- manager->fd_signal = -1;
manager->fd_inotify = -1;
manager->worker_watch[WRITE_END] = -1;
manager->worker_watch[READ_END] = -1;
@@ -1316,31 +1464,14 @@ static int manager_new(Manager **ret) {
udev_list_node_init(&manager->events);
udev_list_init(manager->udev, &manager->properties, true);
- *ret = manager;
- manager = NULL;
-
- return 0;
-}
-
-static int manager_listen(Manager *manager) {
- struct epoll_event ep_ctrl = { .events = EPOLLIN };
- struct epoll_event ep_inotify = { .events = EPOLLIN };
- struct epoll_event ep_signal = { .events = EPOLLIN };
- struct epoll_event ep_netlink = { .events = EPOLLIN };
- struct epoll_event ep_worker = { .events = EPOLLIN };
- sigset_t mask;
- int r, one = 1;
-
- assert(manager);
-
- r = systemd_fds(&manager->fd_ctrl, &manager->fd_uevent);
+ r = systemd_fds(&fd_ctrl, &fd_uevent);
if (r >= 0) {
/* get control and netlink socket from systemd */
- manager->ctrl = udev_ctrl_new_from_fd(manager->udev, manager->fd_ctrl);
+ manager->ctrl = udev_ctrl_new_from_fd(manager->udev, fd_ctrl);
if (!manager->ctrl)
return log_error_errno(EINVAL, "error taking over udev control socket");
- manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", manager->fd_uevent);
+ manager->monitor = udev_monitor_new_from_netlink_fd(manager->udev, "kernel", fd_uevent);
if (!manager->monitor)
return log_error_errno(EINVAL, "error taking over netlink socket");
@@ -1354,13 +1485,13 @@ static int manager_listen(Manager *manager) {
if (!manager->ctrl)
return log_error_errno(EINVAL, "error initializing udev control socket");
- manager->fd_ctrl = udev_ctrl_get_fd(manager->ctrl);
+ fd_ctrl = udev_ctrl_get_fd(manager->ctrl);
manager->monitor = udev_monitor_new_from_netlink(manager->udev, "kernel");
if (!manager->monitor)
return log_error_errno(EINVAL, "error initializing netlink socket");
- manager->fd_uevent = udev_monitor_get_fd(manager->monitor);
+ fd_uevent = udev_monitor_get_fd(manager->monitor);
(void) udev_monitor_set_receive_buffer_size(manager->monitor, 128 * 1024 * 1024);
}
@@ -1373,14 +1504,26 @@ static int manager_listen(Manager *manager) {
if (r < 0)
return log_error_errno(EINVAL, "error binding udev control socket");
+ *ret = manager;
+ manager = NULL;
+
+ return 0;
+}
+
+static int manager_listen(Manager *manager) {
+ sigset_t mask;
+ int r, fd_worker, one = 1;
+
+ assert(manager);
+
/* unnamed socket from workers to the main daemon */
r = socketpair(AF_LOCAL, SOCK_DGRAM|SOCK_CLOEXEC, 0, manager->worker_watch);
if (r < 0)
return log_error_errno(errno, "error creating socketpair: %m");
- manager->fd_worker = manager->worker_watch[READ_END];
+ fd_worker = manager->worker_watch[READ_END];
- r = setsockopt(manager->fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
+ r = setsockopt(fd_worker, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
if (r < 0)
return log_error_errno(errno, "could not enable SO_PASSCRED: %m");
@@ -1393,26 +1536,58 @@ static int manager_listen(Manager *manager) {
/* block and listen to all signals on signalfd */
sigfillset(&mask);
sigprocmask(SIG_SETMASK, &mask, &manager->sigmask_orig);
- manager->fd_signal = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
- if (manager->fd_signal < 0)
- return log_error_errno(errno, "error creating signalfd");
-
- ep_ctrl.data.fd = manager->fd_ctrl;
- ep_inotify.data.fd = manager->fd_inotify;
- ep_signal.data.fd = manager->fd_signal;
- ep_netlink.data.fd = manager->fd_uevent;
- ep_worker.data.fd = manager->fd_worker;
-
- manager->fd_ep = epoll_create1(EPOLL_CLOEXEC);
- if (manager->fd_ep < 0)
- return log_error_errno(errno, "error creating epoll fd: %m");
-
- if (epoll_ctl(manager->fd_ep, EPOLL_CTL_ADD, manager->fd_ctrl, &ep_ctrl) < 0 ||
- epoll_ctl(manager->fd_ep, EPOLL_CTL_ADD, manager->fd_inotify, &ep_inotify) < 0 ||
- epoll_ctl(manager->fd_ep, EPOLL_CTL_ADD, manager->fd_signal, &ep_signal) < 0 ||
- epoll_ctl(manager->fd_ep, EPOLL_CTL_ADD, manager->fd_uevent, &ep_netlink) < 0 ||
- epoll_ctl(manager->fd_ep, EPOLL_CTL_ADD, manager->fd_worker, &ep_worker) < 0)
- return log_error_errno(errno, "fail to add fds to epoll: %m");
+
+ r = sd_event_default(&manager->event);
+ if (r < 0)
+ return log_error_errno(errno, "could not allocate event loop: %m");
+
+ r = sd_event_add_signal(manager->event, NULL, SIGINT, on_sigterm, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating sigint event source: %m");
+
+ r = sd_event_add_signal(manager->event, NULL, SIGTERM, on_sigterm, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating sigterm event source: %m");
+
+ r = sd_event_add_signal(manager->event, NULL, SIGHUP, on_sighup, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating sighup event source: %m");
+
+ r = sd_event_add_signal(manager->event, NULL, SIGCHLD, on_sigchld, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating sigchld event source: %m");
+
+ r = sd_event_set_watchdog(manager->event, true);
+ if (r < 0)
+ return log_error_errno(r, "error creating watchdog event source: %m");
+
+ r = sd_event_add_io(manager->event, &manager->ctrl_event, udev_ctrl_get_fd(manager->ctrl), EPOLLIN, on_ctrl_msg, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating ctrl event source: %m");
+
+ /* This needs to be after the inotify and uevent handling, to make sure
+ * that the ping is send back after fully processing the pending uevents
+ * (including the synthetic ones we may create due to inotify events).
+ */
+ r = sd_event_source_set_priority(manager->ctrl_event, SD_EVENT_PRIORITY_IDLE);
+ if (r < 0)
+ return log_error_errno(r, "cold not set IDLE event priority for ctrl event source: %m");
+
+ r = sd_event_add_io(manager->event, &manager->inotify_event, manager->fd_inotify, EPOLLIN, on_inotify, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating inotify event source: %m");
+
+ r = sd_event_add_io(manager->event, &manager->uevent_event, udev_monitor_get_fd(manager->monitor), EPOLLIN, on_uevent, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating uevent event source: %m");
+
+ r = sd_event_add_io(manager->event, NULL, fd_worker, EPOLLIN, on_worker, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating worker event source: %m");
+
+ r = sd_event_add_post(manager->event, NULL, on_post, manager);
+ if (r < 0)
+ return log_error_errno(r, "error creating post event source: %m");
return 0;
}
@@ -1453,23 +1628,6 @@ int main(int argc, char *argv[]) {
log_debug("set children_max to %u", arg_children_max);
}
- /* before opening new files, make sure std{in,out,err} fds are in a sane state */
- if (arg_daemonize) {
- int fd;
-
- fd = open("/dev/null", O_RDWR);
- if (fd < 0)
- log_error("cannot open /dev/null");
- else {
- if (write(STDOUT_FILENO, 0, 0) < 0)
- dup2(fd, STDOUT_FILENO);
- if (write(STDERR_FILENO, 0, 0) < 0)
- dup2(fd, STDERR_FILENO);
- if (fd > STDERR_FILENO)
- close(fd);
- }
- }
-
/* set umask before creating any file/directory */
r = chdir("/");
if (r < 0) {
@@ -1497,8 +1655,6 @@ int main(int argc, char *argv[]) {
if (r < 0)
goto exit;
- log_info("starting version " VERSION);
-
r = udev_rules_apply_static_dev_perms(manager->rules);
if (r < 0)
log_error_errno(r, "failed to apply permissions on static device nodes: %m");
@@ -1506,6 +1662,8 @@ int main(int argc, char *argv[]) {
if (arg_daemonize) {
pid_t pid;
+ log_info("starting version " VERSION);
+
pid = fork();
switch (pid) {
case 0:
@@ -1523,191 +1681,27 @@ int main(int argc, char *argv[]) {
write_string_file("/proc/self/oom_score_adj", "-1000");
} else
- sd_notify(1, "READY=1");
+ sd_notify(false,
+ "READY=1\n"
+ "STATUS=Processing...");
r = manager_listen(manager);
if (r < 0)
return log_error_errno(r, "failed to set up fds and listen for events: %m");
- for (;;) {
- static usec_t last_usec;
- struct epoll_event ev[8];
- int fdcount;
- int timeout;
- bool is_worker, is_signal, is_inotify, is_uevent, is_ctrl;
- int i;
-
- if (manager->exit) {
- /* close sources of new events and discard buffered events */
- if (manager->fd_ctrl >= 0) {
- epoll_ctl(manager->fd_ep, EPOLL_CTL_DEL, manager->fd_ctrl, NULL);
- manager->fd_ctrl = safe_close(manager->fd_ctrl);
- }
-
- if (manager->monitor) {
- epoll_ctl(manager->fd_ep, EPOLL_CTL_DEL, manager->fd_uevent, NULL);
- manager->monitor = udev_monitor_unref(manager->monitor);
- }
-
- if (manager->fd_inotify >= 0) {
- epoll_ctl(manager->fd_ep, EPOLL_CTL_DEL, manager->fd_inotify, NULL);
- manager->fd_inotify = safe_close(manager->fd_inotify);
- }
-
- /* discard queued events and kill workers */
- event_queue_cleanup(manager, EVENT_QUEUED);
- manager_kill_workers(manager);
-
- /* exit after all has cleaned up */
- if (udev_list_node_is_empty(&manager->events) && hashmap_isempty(manager->workers))
- break;
-
- /* timeout at exit for workers to finish */
- timeout = 30 * MSEC_PER_SEC;
- } else if (udev_list_node_is_empty(&manager->events) && hashmap_isempty(manager->workers)) {
- /* we are idle */
- timeout = -1;
-
- /* cleanup possible left-over processes in our cgroup */
- if (manager->cgroup)
- cg_kill(SYSTEMD_CGROUP_CONTROLLER, manager->cgroup, SIGKILL, false, true, NULL);
- } else {
- /* kill idle or hanging workers */
- timeout = 3 * MSEC_PER_SEC;
- }
-
- fdcount = epoll_wait(manager->fd_ep, ev, ELEMENTSOF(ev), timeout);
- if (fdcount < 0)
- continue;
-
- if (fdcount == 0) {
- struct worker *worker;
- Iterator j;
-
- /* timeout */
- if (manager->exit) {
- log_error("timeout, giving up waiting for workers to finish");
- break;
- }
-
- /* kill idle workers */
- if (udev_list_node_is_empty(&manager->events)) {
- log_debug("cleanup idle workers");
- manager_kill_workers(manager);
- }
-
- /* check for hanging events */
- HASHMAP_FOREACH(worker, manager->workers, j) {
- struct event *event = worker->event;
- usec_t ts;
-
- if (worker->state != WORKER_RUNNING)
- continue;
-
- assert(event);
-
- ts = now(CLOCK_MONOTONIC);
-
- if ((ts - event->start_usec) > arg_event_timeout_warn_usec) {
- if ((ts - event->start_usec) > arg_event_timeout_usec)
- on_event_timeout(NULL, 0, event);
- else if (!event->warned) {
- on_event_timeout_warning(NULL, 0, event);
- event->warned = true;
- }
- }
- }
-
- }
-
- is_worker = is_signal = is_inotify = is_uevent = is_ctrl = false;
- for (i = 0; i < fdcount; i++) {
- if (ev[i].data.fd == manager->fd_worker && ev[i].events & EPOLLIN)
- is_worker = true;
- else if (ev[i].data.fd == manager->fd_uevent && ev[i].events & EPOLLIN)
- is_uevent = true;
- else if (ev[i].data.fd == manager->fd_signal && ev[i].events & EPOLLIN)
- is_signal = true;
- else if (ev[i].data.fd == manager->fd_inotify && ev[i].events & EPOLLIN)
- is_inotify = true;
- else if (ev[i].data.fd == manager->fd_ctrl && ev[i].events & EPOLLIN)
- is_ctrl = true;
- }
-
- /* check for changed config, every 3 seconds at most */
- if ((now(CLOCK_MONOTONIC) - last_usec) > 3 * USEC_PER_SEC) {
- if (udev_rules_check_timestamp(manager->rules))
- manager->reload = true;
- if (udev_builtin_validate(manager->udev))
- manager->reload = true;
-
- last_usec = now(CLOCK_MONOTONIC);
- }
-
- /* reload requested, HUP signal received, rules changed, builtin changed */
- if (manager->reload) {
- manager_kill_workers(manager);
- manager->rules = udev_rules_unref(manager->rules);
- udev_builtin_exit(manager->udev);
- manager->reload = false;
- }
-
- /* event has finished */
- if (is_worker)
- on_worker(NULL, manager->fd_worker, 0, manager);
-
- /* uevent from kernel */
- if (is_uevent)
- on_uevent(NULL, manager->fd_uevent, 0, manager);
-
- /* start new events */
- if (!udev_list_node_is_empty(&manager->events) && !manager->exit && !manager->stop_exec_queue) {
- udev_builtin_init(manager->udev);
- if (!manager->rules)
- manager->rules = udev_rules_new(manager->udev, arg_resolve_names);
- if (manager->rules)
- event_queue_start(manager);
- }
-
- if (is_signal) {
- struct signalfd_siginfo fdsi;
- ssize_t size;
-
- size = read(manager->fd_signal, &fdsi, sizeof(struct signalfd_siginfo));
- if (size == sizeof(struct signalfd_siginfo)) {
- switch (fdsi.ssi_signo) {
- case SIGINT:
- case SIGTERM:
- on_sigterm(NULL, &fdsi, manager);
- break;
- case SIGHUP:
- on_sighup(NULL, &fdsi, manager);
- break;
- case SIGCHLD:
- on_sigchld(NULL, &fdsi, manager);
- break;
- }
- }
- }
-
- /* we are shutting down, the events below are not handled anymore */
- if (manager->exit)
- continue;
-
- /* device node watch */
- if (is_inotify)
- on_inotify(NULL, manager->fd_inotify, 0, manager);
-
- /*
- * This needs to be after the inotify handling, to make sure,
- * that the ping is send back after the possibly generated
- * "change" events by the inotify device node watch.
- */
- if (is_ctrl)
- on_ctrl_msg(NULL, manager->fd_ctrl, 0, manager);
+ r = sd_event_loop(manager->event);
+ if (r < 0) {
+ log_error_errno(r, "event loop failed: %m");
+ goto exit;
}
+ sd_event_get_exit_code(manager->event, &r);
+
exit:
+ sd_notify(false,
+ "STOPPING=1\n"
+ "STATUS=Shutting down...");
+
if (manager)
udev_ctrl_cleanup(manager->ctrl);
mac_selinux_finish();