From 0e409d4a52bd3d37d0aa0ad4e2d7f3b9a8adcdb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beat=20K=C3=BCng?= Date: Tue, 17 Jun 2025 09:48:34 +0200 Subject: [PATCH] Improve multi-monitor handling under wayland (#1276) * fix: improve multi-monitor handling under wayland When a monitor gets disconnected, the destroy event of all associated windows gets called, and the window gets removed. This patch changes that behavior: the window is still closed but the configuration is kept using the existing reload mechanism. In addition, a callback is added to listen for new monitors, triggering a reload when a new monitor gets connected. This logic also reloads already running windows, which has a positive and negative effect: - positive: if currently running e.g. on the second monitor specified in the list, the window can get moved to the first monitor - negative: if reloading starts it on the same monitor, it gets reset (e.g. graphs) I also had to work around an issue: the monitor model is not yet available immediately when a new monitor callback triggers. Waiting in the callback does not help (I tried 10 seconds). However, waiting outside, it always became available after 10ms. Tested with a dual monitor setup under KDE through a combinations of: - enabling/disabling individual monitors - switching between monitors - specifying a specific monitor in the yuck config - specifying a list of specific monitors in the yuck config In all these cases the behavior is as expected, and the widget gets loaded on the first available monitor (or stays unloaded until one becomes available). It also works when opening a window without any of the configured monitors being available. There is one remaining error from GTK when closing the window: GLib-GObject-CRITICAL **: 20:06:05.912: ../gobject/gsignal.c:2684: instance '0x55a4ab4be2d0' has no handler with id '136' This comes from the `self.gtk_window.disconnect(handler_id)` call. To prevent that we'd have to reset `destroy_event_handler_id`. * fix: do not call gtk::main_iteration_do while waiting for monitor model Executors that poll a future cannot be called recursively (in this case glib::main_context_futures::TaskSource::poll). So we cannot call gtk::main_iteration_do here, which in some cases led to the future being polled again, which raised a panic in the form of: thread 'main' panicked at glib/src/main_context_futures.rs:238:56: called `Result::unwrap()` on an `Err` value: EnterError We can just remove it as tokio::time::sleep() ensures the main thread continues to process (gtk) events during that time. --- CHANGELOG.md | 1 + crates/eww/src/app.rs | 72 +++++++++++++++++++++++++++++++--------- crates/eww/src/opts.rs | 2 +- crates/eww/src/server.rs | 39 ++++++++++++++++------ 4 files changed, 87 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3911aa3..9eb4084 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ All notable changes to eww will be listed here, starting at changes since versio - Fix wayland monitor names support (By: dragonnn) - Load systray items that are registered without a path (By: Kage-Yami) - `get_locale` now follows POSIX standard for locale selection (By: mirhahn, w-lfchen) +- Improve multi-monitor handling under wayland (By: bkueng) ### Features - Add warning and docs for incompatible `:anchor` and `:exclusive` options diff --git a/crates/eww/src/app.rs b/crates/eww/src/app.rs index 38a3f84..c89e6f6 100644 --- a/crates/eww/src/app.rs +++ b/crates/eww/src/app.rs @@ -68,6 +68,7 @@ pub enum DaemonCommand { }, CloseWindows { windows: Vec, + auto_reopen: bool, sender: DaemonResponseSender, }, KillServer, @@ -147,16 +148,34 @@ impl std::fmt::Debug for App { } } +/// Wait until the .model() is available for all monitors (or there is a timeout) +async fn wait_for_monitor_model() { + let display = gdk::Display::default().expect("could not get default display"); + let start = std::time::Instant::now(); + loop { + let all_monitors_set = + (0..display.n_monitors()).all(|i| display.monitor(i).and_then(|monitor| monitor.model()).is_some()); + if all_monitors_set { + break; + } + tokio::time::sleep(Duration::from_millis(10)).await; + if std::time::Instant::now() - start > Duration::from_millis(500) { + log::warn!("Timed out waiting for monitor model to be set"); + break; + } + } +} + impl App { /// Handle a [`DaemonCommand`] event, logging any errors that occur. - pub fn handle_command(&mut self, event: DaemonCommand) { - if let Err(err) = self.try_handle_command(event) { + pub async fn handle_command(&mut self, event: DaemonCommand) { + if let Err(err) = self.try_handle_command(event).await { error_handling_ctx::print_error(err); } } /// Try to handle a [`DaemonCommand`] event. - fn try_handle_command(&mut self, event: DaemonCommand) -> Result<()> { + async fn try_handle_command(&mut self, event: DaemonCommand) -> Result<()> { log::debug!("Handling event: {:?}", &event); match event { DaemonCommand::NoOp => {} @@ -174,6 +193,10 @@ impl App { } } DaemonCommand::ReloadConfigAndCss(sender) => { + // Wait for all monitor models to be set. When a new monitor gets added, this + // might not immediately be the case. And if we were to wait inside the + // connect_monitor_added callback, model() never gets set. So instead we wait here. + wait_for_monitor_model().await; let mut errors = Vec::new(); let config_result = config::read_from_eww_paths(&self.paths); @@ -200,7 +223,7 @@ impl App { DaemonCommand::CloseAll => { log::info!("Received close command, closing all windows"); for window_name in self.open_windows.keys().cloned().collect::>() { - self.close_window(&window_name)?; + self.close_window(&window_name, false)?; } } DaemonCommand::OpenMany { windows, args, should_toggle, sender } => { @@ -209,7 +232,7 @@ impl App { .map(|w| { let (config_name, id) = w; if should_toggle && self.open_windows.contains_key(id) { - self.close_window(id) + self.close_window(id, false) } else { log::debug!("Config: {}, id: {}", config_name, id); let window_args = args @@ -240,7 +263,7 @@ impl App { let is_open = self.open_windows.contains_key(&instance_id); let result = if should_toggle && is_open { - self.close_window(&instance_id) + self.close_window(&instance_id, false) } else { self.open_window(&WindowArguments { instance_id, @@ -256,9 +279,10 @@ impl App { sender.respond_with_result(result)?; } - DaemonCommand::CloseWindows { windows, sender } => { - let errors = windows.iter().map(|window| self.close_window(window)).filter_map(Result::err); - sender.respond_with_error_list(errors)?; + DaemonCommand::CloseWindows { windows, auto_reopen, sender } => { + let errors = windows.iter().map(|window| self.close_window(window, auto_reopen)).filter_map(Result::err); + // Ignore sending errors, as the channel might already be closed + let _ = sender.respond_with_error_list(errors); } DaemonCommand::PrintState { all, sender } => { let scope_graph = self.scope_graph.borrow(); @@ -360,7 +384,7 @@ impl App { } /// Close a window and do all the required cleanups in the scope_graph and script_var_handler - fn close_window(&mut self, instance_id: &str) -> Result<()> { + fn close_window(&mut self, instance_id: &str, auto_reopen: bool) -> Result<()> { if let Some(old_abort_send) = self.window_close_timer_abort_senders.remove(instance_id) { _ = old_abort_send.send(()); } @@ -380,7 +404,17 @@ impl App { self.script_var_handler.stop_for_variable(unused_var.clone()); } - self.instance_id_to_args.remove(instance_id); + if auto_reopen { + self.failed_windows.insert(instance_id.to_string()); + // There might be an alternative monitor available already, so try to re-open it immediately. + // This can happen for example when a monitor gets disconnected and another connected, + // and the connection event happens before the disconnect. + if let Some(window_arguments) = self.instance_id_to_args.get(instance_id) { + let _ = self.open_window(&window_arguments.clone()); + } + } else { + self.instance_id_to_args.remove(instance_id); + } Ok(()) } @@ -392,7 +426,7 @@ impl App { // if an instance of this is already running, close it if self.open_windows.contains_key(instance_id) { - self.close_window(instance_id)?; + self.close_window(instance_id, false)?; } self.instance_id_to_args.insert(instance_id.to_string(), window_args.clone()); @@ -445,9 +479,17 @@ impl App { move |_| { // we don't care about the actual error response from the daemon as this is mostly just a fallback. // Generally, this should get disconnected before the gtk window gets destroyed. - // It serves as a fallback for when the window is closed manually. + // This callback is triggered in 2 cases: + // - When the monitor of this window gets disconnected + // - When the window is closed manually. + // We don't distinguish here and assume the window should be reopened once a monitor + // becomes available again let (response_sender, _) = daemon_response::create_pair(); - let command = DaemonCommand::CloseWindows { windows: vec![instance_id.clone()], sender: response_sender }; + let command = DaemonCommand::CloseWindows { + windows: vec![instance_id.clone()], + auto_reopen: true, + sender: response_sender, + }; if let Err(err) = app_evt_sender.send(command) { log::error!("Error sending close window command to daemon after gtk window destroy event: {}", err); } @@ -466,7 +508,7 @@ impl App { tokio::select! { _ = glib::timeout_future(duration) => { let (response_sender, mut response_recv) = daemon_response::create_pair(); - let command = DaemonCommand::CloseWindows { windows: vec![instance_id.clone()], sender: response_sender }; + let command = DaemonCommand::CloseWindows { windows: vec![instance_id.clone()], auto_reopen: false, sender: response_sender }; if let Err(err) = app_evt_sender.send(command) { log::error!("Error sending close window command to daemon after gtk window destroy event: {}", err); } diff --git a/crates/eww/src/opts.rs b/crates/eww/src/opts.rs index fb010c6..eba9a04 100644 --- a/crates/eww/src/opts.rs +++ b/crates/eww/src/opts.rs @@ -292,7 +292,7 @@ impl ActionWithServer { }) } ActionWithServer::CloseWindows { windows } => { - return with_response_channel(|sender| app::DaemonCommand::CloseWindows { windows, sender }); + return with_response_channel(|sender| app::DaemonCommand::CloseWindows { windows, auto_reopen: false, sender }); } ActionWithServer::Reload => return with_response_channel(app::DaemonCommand::ReloadConfigAndCss), ActionWithServer::ListWindows => return with_response_channel(app::DaemonCommand::ListWindows), diff --git a/crates/eww/src/server.rs b/crates/eww/src/server.rs index 8e22d3f..718fdb7 100644 --- a/crates/eww/src/server.rs +++ b/crates/eww/src/server.rs @@ -105,13 +105,15 @@ pub fn initialize_server( } } + connect_monitor_added(ui_send.clone()); + // initialize all the handlers and tasks running asyncronously let tokio_handle = init_async_part(app.paths.clone(), ui_send); gtk::glib::MainContext::default().spawn_local(async move { // if an action was given to the daemon initially, execute it first. if let Some(action) = action { - app.handle_command(action); + app.handle_command(action).await; } loop { @@ -120,7 +122,7 @@ pub fn initialize_server( app.scope_graph.borrow_mut().handle_scope_graph_event(scope_graph_evt); }, Some(ui_event) = ui_recv.recv() => { - app.handle_command(ui_event); + app.handle_command(ui_event).await; } else => break, } @@ -136,6 +138,29 @@ pub fn initialize_server( Ok(ForkResult::Child) } +fn connect_monitor_added(ui_send: UnboundedSender) { + let display = gtk::gdk::Display::default().expect("could not get default display"); + display.connect_monitor_added({ + move |_display: >k::gdk::Display, _monitor: >k::gdk::Monitor| { + log::info!("New monitor connected, reloading configuration"); + let _ = reload_config_and_css(&ui_send); + } + }); +} + +fn reload_config_and_css(ui_send: &UnboundedSender) -> Result<()> { + let (daemon_resp_sender, mut daemon_resp_response) = daemon_response::create_pair(); + ui_send.send(DaemonCommand::ReloadConfigAndCss(daemon_resp_sender))?; + tokio::spawn(async move { + match daemon_resp_response.recv().await { + Some(daemon_response::DaemonResponse::Success(_)) => log::info!("Reloaded config successfully"), + Some(daemon_response::DaemonResponse::Failure(e)) => eprintln!("{}", e), + None => log::error!("No response to reload configuration-reload request"), + } + }); + Ok(()) +} + fn init_async_part(paths: EwwPaths, ui_send: UnboundedSender) -> tokio::runtime::Handle { let rt = tokio::runtime::Builder::new_multi_thread() .thread_name("main-async-runtime") @@ -216,20 +241,12 @@ async fn run_filewatch>(config_dir: P, evt_send: UnboundedSender< debounce_done.store(true, Ordering::SeqCst); }); - let (daemon_resp_sender, mut daemon_resp_response) = daemon_response::create_pair(); // without this sleep, reading the config file sometimes gives an empty file. // This is probably a result of editors not locking the file correctly, // and eww being too fast, thus reading the file while it's empty. // There should be some cleaner solution for this, but this will do for now. tokio::time::sleep(std::time::Duration::from_millis(50)).await; - evt_send.send(app::DaemonCommand::ReloadConfigAndCss(daemon_resp_sender))?; - tokio::spawn(async move { - match daemon_resp_response.recv().await { - Some(daemon_response::DaemonResponse::Success(_)) => log::info!("Reloaded config successfully"), - Some(daemon_response::DaemonResponse::Failure(e)) => eprintln!("{}", e), - None => log::error!("No response to reload configuration-reload request"), - } - }); + reload_config_and_css(&evt_send)?; } }, else => break