From 9089b560e6d5a4fff6fb00ac786a3d70d7cb7803 Mon Sep 17 00:00:00 2001 From: Antoine Martin Date: Thu, 19 Dec 2024 22:21:12 +0700 Subject: [PATCH] prevent division by zero errors these can happen on MS Windows because the timestamps lack granularity --- xpra/client/gui/window_backing_base.py | 2 +- xpra/codecs/nvidia/cuda/image.py | 6 +++++- xpra/net/file_transfer.py | 6 +++++- xpra/opengl/check.py | 5 +++-- xpra/server/source/source_stats.py | 13 +++++++------ xpra/server/window/perfstats.py | 4 +++- 6 files changed, 24 insertions(+), 12 deletions(-) diff --git a/xpra/client/gui/window_backing_base.py b/xpra/client/gui/window_backing_base.py index df0de97c46..5c2a5e1826 100644 --- a/xpra/client/gui/window_backing_base.py +++ b/xpra/client/gui/window_backing_base.py @@ -225,7 +225,7 @@ def calculate_fps(self) -> int: e0 = pe[0] now = monotonic() elapsed = now - e0 - if elapsed <= 1 and len(pe) >= 5: + if 0 < elapsed <= 1 and len(pe) >= 5: return round(len(pe) / elapsed) cutoff = now - 1 count = 0 diff --git a/xpra/codecs/nvidia/cuda/image.py b/xpra/codecs/nvidia/cuda/image.py index 3c5a00edc9..3d35875f01 100644 --- a/xpra/codecs/nvidia/cuda/image.py +++ b/xpra/codecs/nvidia/cuda/image.py @@ -48,9 +48,13 @@ def may_download(self): self.wait_for_stream() self.pixels = host_buffer.tobytes() elapsed = monotonic() - start + if elapsed > 0: + mbs = self.buffer_size / elapsed / 1024 / 1024 + else: + mbs = 9999 log("may_download() from %#x to %s, size=%s, elapsed=%ims - %iMB/s", int(self.cuda_device_buffer), host_buffer, self.buffer_size, - int(1000 * elapsed), self.buffer_size / elapsed / 1024 / 1024) + int(1000 * elapsed), mbs) self.free_cuda() ctx.pop() diff --git a/xpra/net/file_transfer.py b/xpra/net/file_transfer.py index 7d4634bddf..ab605857ef 100644 --- a/xpra/net/file_transfer.py +++ b/xpra/net/file_transfer.py @@ -1056,8 +1056,12 @@ def _process_ack_file_chunk(self, packet: PacketType) -> None: if not chunk_state.data: # all sent! elapsed = monotonic() - chunk_state.start + if elapsed > 0: + bs = chunk * chunk_size / elapsed + else: + bs = 9999 * 1000 * 1000 filelog("%i chunks of %i bytes sent in %ims (%sB/s)", - chunk, chunk_size, elapsed * 1000, std_unit(chunk * chunk_size / elapsed)) + chunk, chunk_size, elapsed * 1000, std_unit(bs)) self.cancel_sending(chunk_id) return assert chunk_size > 0 diff --git a/xpra/opengl/check.py b/xpra/opengl/check.py index 5d2c76c01a..3a4f6399bb 100755 --- a/xpra/opengl/check.py +++ b/xpra/opengl/check.py @@ -389,11 +389,12 @@ def recs(rname) -> list[str]: else: log.info(msg) if missing_accelerators: - if len(missing_accelerators) == 1 and missing_accelerators[0] == "numpy_formathandler": + missing_str = csv(missing_accelerators) + if missing_str == "numpy_formathandler": log_fn = log.debug else: log_fn = log.info - log_fn("OpenGL accelerate missing: %s", csv(missing_accelerators)) + log_fn(f"OpenGL accelerate missing: {missing_str}") for msg in recs("plugins"): log(f"plugins msg={msg}") diff --git a/xpra/server/source/source_stats.py b/xpra/server/source/source_stats.py index cbf3b1a62b..5dcfb3d507 100644 --- a/xpra/server/source/source_stats.py +++ b/xpra/server/source/source_stats.py @@ -285,10 +285,11 @@ def get_info(self) -> dict[str, Any]: einfo["pixels_decoded_per_second"] = pixels_decoded_per_second if start_time: elapsed = now - start_time - pixels_per_second = int(total_pixels / elapsed) - einfo.update({ - "pixels_per_second": pixels_per_second, - "regions_per_second": int(len(region_sizes) / elapsed), - "average_region_size": int(total_pixels / len(region_sizes)), - }) + if elapsed > 0: + einfo.update({ + "pixels_per_second": int(total_pixels / elapsed), + "regions_per_second": int(len(region_sizes) / elapsed), + }) + if region_sizes: + einfo["average_region_size"] = int(total_pixels / len(region_sizes)) return info diff --git a/xpra/server/window/perfstats.py b/xpra/server/window/perfstats.py index 0ee31a8219..2a27628520 100644 --- a/xpra/server/window/perfstats.py +++ b/xpra/server/window/perfstats.py @@ -109,7 +109,9 @@ def update_averages(self) -> None: if cdt: # the elapsed time recorded is in microseconds: decode_speed = tuple( - (event_time, size, int(size * 1000 * 1000 / elapsed)) for event_time, size, elapsed in cdt) + (event_time, size, int(size * 1000 * 1000 / elapsed)) + for event_time, size, elapsed in cdt if elapsed > 0 + ) r = calculate_size_weighted_average(decode_speed) self.avg_decode_speed = int(r[0]) self.recent_decode_speed = int(r[1])