Diff of the two buildlogs: -- --- b1/build.log 2025-03-11 13:10:38.274485475 +0000 +++ b2/build.log 2025-03-11 14:22:59.428005642 +0000 @@ -1,6 +1,6 @@ I: pbuilder: network access will be disabled during build -I: Current time: Mon Apr 13 07:04:45 -12 2026 -I: pbuilder-time-stamp: 1776107085 +I: Current time: Wed Mar 12 03:10:42 +14 2025 +I: pbuilder-time-stamp: 1741698642 I: Building the build Environment I: extracting base tarball [/var/cache/pbuilder/trixie-reproducible-base.tgz] I: copying local configuration @@ -44,52 +44,84 @@ dpkg-source: info: applying dask-matching-interpreter.patch I: Not using root during the build. I: Installing the build-deps -I: user script /srv/workspace/pbuilder/367956/tmp/hooks/D02_print_environment starting +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/D01_modify_environment starting +debug: Running on codethink04-arm64. +I: Changing host+domainname to test build reproducibility +I: Adding a custom variable just for the fun of it... +I: Changing /bin/sh to bash +'/bin/sh' -> '/bin/bash' +lrwxrwxrwx 1 root root 9 Mar 11 13:10 /bin/sh -> /bin/bash +I: Setting pbuilder2's login shell to /bin/bash +I: Setting pbuilder2's GECOS to second user,second room,second work-phone,second home-phone,second other +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/D01_modify_environment finished +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/D02_print_environment starting I: set - BUILDDIR='/build/reproducible-path' - BUILDUSERGECOS='first user,first room,first work-phone,first home-phone,first other' - BUILDUSERNAME='pbuilder1' - BUILD_ARCH='arm64' - DEBIAN_FRONTEND='noninteractive' + BASH=/bin/sh + BASHOPTS=checkwinsize:cmdhist:complete_fullquote:extquote:force_fignore:globasciiranges:globskipdots:hostcomplete:interactive_comments:patsub_replacement:progcomp:promptvars:sourcepath + BASH_ALIASES=() + BASH_ARGC=() + BASH_ARGV=() + BASH_CMDS=() + BASH_LINENO=([0]="12" [1]="0") + BASH_LOADABLES_PATH=/usr/local/lib/bash:/usr/lib/bash:/opt/local/lib/bash:/usr/pkg/lib/bash:/opt/pkg/lib/bash:. + BASH_SOURCE=([0]="/tmp/hooks/D02_print_environment" [1]="/tmp/hooks/D02_print_environment") + BASH_VERSINFO=([0]="5" [1]="2" [2]="37" [3]="1" [4]="release" [5]="aarch64-unknown-linux-gnu") + BASH_VERSION='5.2.37(1)-release' + BUILDDIR=/build/reproducible-path + BUILDUSERGECOS='second user,second room,second work-phone,second home-phone,second other' + BUILDUSERNAME=pbuilder2 + BUILD_ARCH=arm64 + DEBIAN_FRONTEND=noninteractive DEB_BUILD_OPTIONS='buildinfo=+all reproducible=+all parallel=12 ' - DISTRIBUTION='trixie' - HOME='/root' - HOST_ARCH='arm64' + DIRSTACK=() + DISTRIBUTION=trixie + EUID=0 + FUNCNAME=([0]="Echo" [1]="main") + GROUPS=() + HOME=/root + HOSTNAME=i-capture-the-hostname + HOSTTYPE=aarch64 + HOST_ARCH=arm64 IFS=' ' - INVOCATION_ID='9210cd9384c0456b832be55c8e3c9811' - LANG='C' - LANGUAGE='en_US:en' - LC_ALL='C' - MAIL='/var/mail/root' - OPTIND='1' - PATH='/usr/sbin:/usr/bin:/sbin:/bin:/usr/games' - PBCURRENTCOMMANDLINEOPERATION='build' - PBUILDER_OPERATION='build' - PBUILDER_PKGDATADIR='/usr/share/pbuilder' - PBUILDER_PKGLIBDIR='/usr/lib/pbuilder' - PBUILDER_SYSCONFDIR='/etc' - PPID='367956' - PS1='# ' - PS2='> ' + INVOCATION_ID=67e7af7fedfa4d438abaff4a24c01317 + LANG=C + LANGUAGE=nl_BE:nl + LC_ALL=C + MACHTYPE=aarch64-unknown-linux-gnu + MAIL=/var/mail/root + OPTERR=1 + OPTIND=1 + OSTYPE=linux-gnu + PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path + PBCURRENTCOMMANDLINEOPERATION=build + PBUILDER_OPERATION=build + PBUILDER_PKGDATADIR=/usr/share/pbuilder + PBUILDER_PKGLIBDIR=/usr/lib/pbuilder + PBUILDER_SYSCONFDIR=/etc + PIPESTATUS=([0]="0") + POSIXLY_CORRECT=y + PPID=130149 PS4='+ ' - PWD='/' - SHELL='/bin/bash' - SHLVL='2' - SUDO_COMMAND='/usr/bin/timeout -k 18.1h 18h /usr/bin/ionice -c 3 /usr/bin/nice /usr/sbin/pbuilder --build --configfile /srv/reproducible-results/rbuild-debian/r-b-build.z6v1fNaT/pbuilderrc_vGaC --distribution trixie --hookdir /etc/pbuilder/first-build-hooks --debbuildopts -b --basetgz /var/cache/pbuilder/trixie-reproducible-base.tgz --buildresult /srv/reproducible-results/rbuild-debian/r-b-build.z6v1fNaT/b1 --logfile b1/build.log dask.distributed_2024.12.1+ds-1.dsc' - SUDO_GID='109' - SUDO_UID='104' - SUDO_USER='jenkins' - TERM='unknown' - TZ='/usr/share/zoneinfo/Etc/GMT+12' - USER='root' - _='/usr/bin/systemd-run' - http_proxy='http://192.168.101.4:3128' + PWD=/ + SHELL=/bin/bash + SHELLOPTS=braceexpand:errexit:hashall:interactive-comments:posix + SHLVL=3 + SUDO_COMMAND='/usr/bin/timeout -k 24.1h 24h /usr/bin/ionice -c 3 /usr/bin/nice -n 11 /usr/bin/unshare --uts -- /usr/sbin/pbuilder --build --configfile /srv/reproducible-results/rbuild-debian/r-b-build.z6v1fNaT/pbuilderrc_jas6 --distribution trixie --hookdir /etc/pbuilder/rebuild-hooks --debbuildopts -b --basetgz /var/cache/pbuilder/trixie-reproducible-base.tgz --buildresult /srv/reproducible-results/rbuild-debian/r-b-build.z6v1fNaT/b2 --logfile b2/build.log dask.distributed_2024.12.1+ds-1.dsc' + SUDO_GID=109 + SUDO_UID=104 + SUDO_USER=jenkins + TERM=unknown + TZ=/usr/share/zoneinfo/Etc/GMT-14 + UID=0 + USER=root + _='I: set' + http_proxy=http://192.168.101.4:3128 I: uname -a - Linux codethink03-arm64 6.1.0-31-cloud-arm64 #1 SMP Debian 6.1.128-1 (2025-02-07) aarch64 GNU/Linux + Linux i-capture-the-hostname 6.1.0-31-cloud-arm64 #1 SMP Debian 6.1.128-1 (2025-02-07) aarch64 GNU/Linux I: ls -l /bin - lrwxrwxrwx 1 root root 7 Mar 4 2025 /bin -> usr/bin -I: user script /srv/workspace/pbuilder/367956/tmp/hooks/D02_print_environment finished + lrwxrwxrwx 1 root root 7 Mar 4 11:20 /bin -> usr/bin +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/D02_print_environment finished -> Attempting to satisfy build-dependencies -> Creating pbuilder-satisfydepends-dummy package Package: pbuilder-satisfydepends-dummy @@ -446,7 +478,7 @@ Get: 233 http://deb.debian.org/debian trixie/main arm64 python3-zict all 3.0.0-2 [29.7 kB] Get: 234 http://deb.debian.org/debian trixie/main arm64 tzdata-legacy all 2025a-2 [178 kB] Get: 235 http://deb.debian.org/debian trixie/main arm64 uglifyjs all 3.17.4-2 [12.1 kB] -Fetched 136 MB in 2s (78.7 MB/s) +Fetched 136 MB in 2s (66.1 MB/s) Preconfiguring packages ... Selecting previously unselected package fonts-lato. (Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 19914 files and directories currently installed.) @@ -1198,8 +1230,8 @@ Setting up tzdata (2025a-2) ... Current default time zone: 'Etc/UTC' -Local time is now: Mon Apr 13 19:06:10 UTC 2026. -Universal Time is now: Mon Apr 13 19:06:10 UTC 2026. +Local time is now: Tue Mar 11 13:12:55 UTC 2025. +Universal Time is now: Tue Mar 11 13:12:55 UTC 2025. Run 'dpkg-reconfigure tzdata' if you wish to change it. Setting up libpgm-5.3-0t64:arm64 (5.3.128~dfsg-2.1+b1) ... @@ -1426,7 +1458,11 @@ Building tag database... -> Finished parsing the build-deps I: Building the package -I: Running cd /build/reproducible-path/dask.distributed-2024.12.1+ds/ && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games" HOME="/nonexistent/first-build" dpkg-buildpackage -us -uc -b && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games" HOME="/nonexistent/first-build" dpkg-genchanges -S > ../dask.distributed_2024.12.1+ds-1_source.changes +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/A99_set_merged_usr starting +Not re-configuring usrmerge for trixie +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/A99_set_merged_usr finished +hostname: Name or service not known +I: Running cd /build/reproducible-path/dask.distributed-2024.12.1+ds/ && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path" HOME="/nonexistent/second-build" dpkg-buildpackage -us -uc -b && env PATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/i/capture/the/path" HOME="/nonexistent/second-build" dpkg-genchanges -S > ../dask.distributed_2024.12.1+ds-1_source.changes dpkg-buildpackage: info: source package dask.distributed dpkg-buildpackage: info: source version 2024.12.1+ds-1 dpkg-buildpackage: info: source distribution unstable @@ -2803,7 +2839,7 @@ Copying distributed.egg-info to build/bdist.linux-aarch64/wheel/./distributed-2024.12.1.egg-info running install_scripts creating build/bdist.linux-aarch64/wheel/distributed-2024.12.1.dist-info/WHEEL -creating '/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/.tmp-l2e3sqh9/distributed-2024.12.1-py3-none-any.whl' and adding 'build/bdist.linux-aarch64/wheel' to it +creating '/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/.tmp-w5r6aevg/distributed-2024.12.1-py3-none-any.whl' and adding 'build/bdist.linux-aarch64/wheel' to it adding 'distributed/__init__.py' adding 'distributed/_async_taskgroup.py' adding 'distributed/_asyncio.py' @@ -3508,6 +3544,9 @@ distributed/diagnostics/tests/test_nanny_plugin.py::test_plugin_with_broken_setup_on_new_nanny_logs PASSED [ 2%] distributed/diagnostics/tests/test_nanny_plugin.py::test_unregister_nanny_plugin_with_broken_teardown_raises PASSED [ 3%] distributed/diagnostics/tests/test_nanny_plugin.py::test_nanny_plugin_with_broken_teardown_logs_on_close PASSED [ 3%] +distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [ 3%] +distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [ 3%] +distributed/diagnostics/tests/test_progress.py::test_many_Progress RERUN [ 3%] distributed/diagnostics/tests/test_progress.py::test_many_Progress PASSED [ 3%] distributed/diagnostics/tests/test_progress.py::test_multiprogress PASSED [ 3%] distributed/diagnostics/tests/test_progress.py::test_multiprogress_cancel PASSED [ 3%] @@ -4796,7 +4835,7 @@ distributed/tests/test_client.py::test_computation_object_code_not_available SKIPPED [ 43%] distributed/tests/test_client.py::test_computation_object_code_dask_persist PASSED [ 43%] distributed/tests/test_client.py::test_computation_object_code_client_submit_simple PASSED [ 43%] -distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp PASSED [ 43%] +distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp FAILED [ 43%] distributed/tests/test_client.py::test_computation_object_code_client_submit_dict_comp PASSED [ 43%] distributed/tests/test_client.py::test_computation_object_code_client_map PASSED [ 43%] distributed/tests/test_client.py::test_computation_object_code_client_compute PASSED [ 43%] @@ -4846,7 +4885,7 @@ distributed/tests/test_client.py::test_resolves_future_in_dict PASSED [ 44%] distributed/tests/test_client.py::test_gather_race_vs_AMM[False] PASSED [ 44%] distributed/tests/test_client.py::test_gather_race_vs_AMM[True] PASSED [ 44%] -distributed/tests/test_client.py::test_client_disconnect_exception_on_cancelled_futures PASSED [ 44%] +distributed/tests/test_client.py::test_client_disconnect_exception_on_cancelled_futures FAILED [ 44%] distributed/tests/test_client.py::test_scheduler_restart_exception_on_cancelled_futures SKIPPED [ 44%] distributed/tests/test_client.py::test_release_persisted_collection PASSED [ 44%] distributed/tests/test_client.py::test_release_persisted_collection_sync PASSED [ 44%] @@ -6042,7 +6081,7 @@ distributed/tests/test_tls_functional.py::test_worker_client PASSED [ 81%] distributed/tests/test_tls_functional.py::test_worker_client_gather PASSED [ 81%] distributed/tests/test_tls_functional.py::test_worker_client_executor PASSED [ 81%] -distributed/tests/test_tls_functional.py::test_retire_workers FAILED [ 81%] +distributed/tests/test_tls_functional.py::test_retire_workers PASSED [ 81%] distributed/tests/test_tls_functional.py::test_security_dict_input PASSED [ 81%] distributed/tests/test_utils.py::test_All PASSED [ 81%] distributed/tests/test_utils.py::test_sync PASSED [ 81%] @@ -6393,7 +6432,7 @@ distributed/tests/test_worker.py::test_missing_released_zombie_tasks PASSED [ 91%] distributed/tests/test_worker.py::test_missing_released_zombie_tasks_2 PASSED [ 92%] distributed/tests/test_worker.py::test_worker_status_sync PASSED [ 92%] -distributed/tests/test_worker.py::test_log_remove_worker PASSED [ 92%] +distributed/tests/test_worker.py::test_log_remove_worker FAILED [ 92%] distributed/tests/test_worker.py::test_task_flight_compute_oserror PASSED [ 92%] distributed/tests/test_worker.py::test_gather_dep_cancelled_rescheduled PASSED [ 92%] distributed/tests/test_worker.py::test_gather_dep_do_not_handle_response_of_not_requested_tasks PASSED [ 92%] @@ -6450,7 +6489,7 @@ distributed/tests/test_worker_memory.py::test_workerstate_fail_to_pickle_execute_1[long-running] PASSED [ 93%] distributed/tests/test_worker_memory.py::test_workerstate_fail_to_pickle_flight PASSED [ 93%] distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_2 PASSED [ 93%] -distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill PASSED [ 93%] +distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill FAILED [ 93%] distributed/tests/test_worker_memory.py::test_spill_target_threshold PASSED [ 93%] distributed/tests/test_worker_memory.py::test_spill_constrained PASSED [ 93%] distributed/tests/test_worker_memory.py::test_spill_spill_threshold PASSED [ 93%] @@ -6469,7 +6508,7 @@ distributed/tests/test_worker_memory.py::test_nanny_terminate SKIPPED [ 94%] distributed/tests/test_worker_memory.py::test_disk_cleanup_on_terminate[False] SKIPPED [ 94%] distributed/tests/test_worker_memory.py::test_disk_cleanup_on_terminate[True] SKIPPED [ 94%] -distributed/tests/test_worker_memory.py::test_pause_while_spilling PASSED [ 94%] +distributed/tests/test_worker_memory.py::test_pause_while_spilling FAILED [ 94%] distributed/tests/test_worker_memory.py::test_release_evloop_while_spilling SKIPPED [ 94%] distributed/tests/test_worker_memory.py::test_deprecated_attributes[Worker-memory_limit-123000000000.0] PASSED [ 94%] distributed/tests/test_worker_memory.py::test_deprecated_attributes[Worker-memory_target_fraction-0.789] PASSED [ 94%] @@ -6482,8 +6521,8 @@ distributed/tests/test_worker_memory.py::test_deprecated_params[memory_target_fraction] PASSED [ 94%] distributed/tests/test_worker_memory.py::test_deprecated_params[memory_spill_fraction] PASSED [ 94%] distributed/tests/test_worker_memory.py::test_deprecated_params[memory_pause_fraction] PASSED [ 94%] -distributed/tests/test_worker_memory.py::test_pause_while_idle PASSED [ 94%] -distributed/tests/test_worker_memory.py::test_pause_while_saturated PASSED [ 94%] +distributed/tests/test_worker_memory.py::test_pause_while_idle FAILED [ 94%] +distributed/tests/test_worker_memory.py::test_pause_while_saturated FAILED [ 94%] distributed/tests/test_worker_memory.py::test_worker_log_memory_limit_too_high PASSED [ 94%] distributed/tests/test_worker_memory.py::test_high_unmanaged_memory_warning PASSED [ 94%] distributed/tests/test_worker_memory.py::test_delete_spilled_keys PASSED [ 94%] @@ -6649,9 +6688,429 @@ distributed/tests/test_worker_state_machine.py::test_remove_worker_unknown PASSED [100%] =================================== FAILURES =================================== +_____________ test_computation_object_code_client_submit_list_comp _____________ + +c = +s = +a = +b = + + @gen_cluster(client=True, config={"distributed.diagnostics.computations.nframes": 2}) + async def test_computation_object_code_client_submit_list_comp(c, s, a, b): + def func(x): + return x + + futs = [c.submit(func, x) for x in range(10)] + + await c.gather(futs) + + test_function_code = inspect.getsource( + test_computation_object_code_client_submit_list_comp.__wrapped__ + ) + computations = list(s.computations) +> assert len(computations) == 1 +E assert 2 == 1 +E + where 2 = len([, ]) + +distributed/tests/test_client.py:7339: AssertionError +----------------------------- Captured stderr call ----------------------------- +2025-03-12 03:44:14,165 - distributed.scheduler - INFO - State start +2025-03-12 03:44:14,169 - distributed.scheduler - INFO - Scheduler at: tcp://127.0.0.1:42103 +2025-03-12 03:44:14,186 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38295/status +2025-03-12 03:44:14,187 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 03:44:14,214 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:34423 +2025-03-12 03:44:14,215 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:34423 +2025-03-12 03:44:14,216 - distributed.worker - INFO - Worker name: 0 +2025-03-12 03:44:14,217 - distributed.worker - INFO - dashboard at: 127.0.0.1:39107 +2025-03-12 03:44:14,230 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:42103 +2025-03-12 03:44:14,231 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:44:14,232 - distributed.worker - INFO - Threads: 1 +2025-03-12 03:44:14,233 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 03:44:14,242 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-hg1w0kok +2025-03-12 03:44:14,243 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:44:14,255 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:41927 +2025-03-12 03:44:14,256 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:41927 +2025-03-12 03:44:14,257 - distributed.worker - INFO - Worker name: 1 +2025-03-12 03:44:14,266 - distributed.worker - INFO - dashboard at: 127.0.0.1:38885 +2025-03-12 03:44:14,267 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:42103 +2025-03-12 03:44:14,268 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:44:14,269 - distributed.worker - INFO - Threads: 2 +2025-03-12 03:44:14,277 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 03:44:14,279 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-kk4hp9f9 +2025-03-12 03:44:14,280 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:44:14,391 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:34423 name: 0 +2025-03-12 03:44:14,450 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:34423 +2025-03-12 03:44:14,451 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:52536 +2025-03-12 03:44:14,451 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:41927 name: 1 +2025-03-12 03:44:14,506 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:41927 +2025-03-12 03:44:14,507 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:52548 +2025-03-12 03:44:14,508 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 03:44:14,509 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 03:44:14,523 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:42103 +2025-03-12 03:44:14,525 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:44:14,534 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:42103 +2025-03-12 03:44:14,535 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:44:14,537 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:42103 +2025-03-12 03:44:14,537 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:42103 +2025-03-12 03:44:14,605 - distributed.scheduler - INFO - Receive client connection: Client-ebb14f0b-fe7e-11ef-9228-ad3bd1b43ef1 +2025-03-12 03:44:14,652 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:52562 +2025-03-12 03:44:14,898 - distributed.scheduler - INFO - Remove client Client-ebb14f0b-fe7e-11ef-9228-ad3bd1b43ef1 +2025-03-12 03:44:14,900 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:52562; closing. +2025-03-12 03:44:14,900 - distributed.scheduler - INFO - Remove client Client-ebb14f0b-fe7e-11ef-9228-ad3bd1b43ef1 +2025-03-12 03:44:14,910 - distributed.scheduler - INFO - Close client connection: Client-ebb14f0b-fe7e-11ef-9228-ad3bd1b43ef1 +2025-03-12 03:44:14,913 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:34423. Reason: worker-close +2025-03-12 03:44:14,923 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:41927. Reason: worker-close +2025-03-12 03:44:14,925 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 03:44:14,934 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 03:44:14,954 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:52548; closing. +2025-03-12 03:44:14,954 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:52536; closing. +2025-03-12 03:44:14,954 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:41927 name: 1 (stimulus_id='handle-worker-cleanup-1741700654.9547553') +2025-03-12 03:44:14,956 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:34423 name: 0 (stimulus_id='handle-worker-cleanup-1741700654.9564517') +2025-03-12 03:44:14,957 - distributed.scheduler - INFO - Lost all workers +2025-03-12 03:44:14,967 - distributed.core - INFO - Connection to tcp://127.0.0.1:42103 has been closed. +2025-03-12 03:44:14,990 - distributed.core - INFO - Connection to tcp://127.0.0.1:42103 has been closed. +2025-03-12 03:44:15,014 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 03:44:15,015 - distributed.scheduler - INFO - Scheduler closing all comms +____________ test_client_disconnect_exception_on_cancelled_futures _____________ +ConnectionRefusedError: [Errno 111] Connection refused + +The above exception was the direct cause of the following exception: + +addr = 'tcp://127.0.0.1:35317', timeout = 5, deserialize = True +handshake_overrides = None +connection_args = {'extra_conn_args': {}, 'require_encryption': False, 'ssl_context': None} +scheme = 'tcp', loc = '127.0.0.1:35317' +backend = +connector = +comm = None, time_left = .time_left at 0xffff45adb7e0> +backoff_base = 0.01 + + async def connect( + addr, timeout=None, deserialize=True, handshake_overrides=None, **connection_args + ): + """ + Connect to the given address (a URI such as ``tcp://127.0.0.1:1234``) + and yield a ``Comm`` object. If the connection attempt fails, it is + retried until the *timeout* is expired. + """ + if timeout is None: + timeout = dask.config.get("distributed.comm.timeouts.connect") + timeout = parse_timedelta(timeout, default="seconds") + + scheme, loc = parse_address(addr) + backend = registry.get_backend(scheme) + connector = backend.get_connector() + comm = None + + start = time() + + def time_left(): + deadline = start + timeout + return max(0, deadline - time()) + + backoff_base = 0.01 + attempt = 0 + logger.debug("Establishing connection to %s", loc) + # Prefer multiple small attempts than one long attempt. This should protect + # primarily from DNS race conditions + # gh3104, gh4176, gh4167 + intermediate_cap = timeout / 5 + active_exception = None + while time_left() > 0: + try: +> comm = await wait_for( + connector.connect(loc, deserialize=deserialize, **connection_args), + timeout=min(intermediate_cap, time_left()), + ) + +distributed/comm/core.py:342: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +distributed/utils.py:1914: in wait_for + return await fut +distributed/comm/tcp.py:559: in connect + convert_stream_closed_error(self, e) +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +obj = +exc = ConnectionRefusedError(111, 'Connection refused') + + def convert_stream_closed_error(obj, exc): + """ + Re-raise StreamClosedError as CommClosedError. + """ + if exc.real_error is not None: + # The stream was closed because of an underlying OS error + exc = exc.real_error + if isinstance(exc, ssl.SSLError): + if exc.reason and "UNKNOWN_CA" in exc.reason: + raise FatalCommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") +> raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc +E distributed.comm.core.CommClosedError: in : ConnectionRefusedError: [Errno 111] Connection refused + +distributed/comm/tcp.py:140: CommClosedError + +The above exception was the direct cause of the following exception: + +c = +s = +a = +b = + + @gen_cluster(client=True) + async def test_client_disconnect_exception_on_cancelled_futures(c, s, a, b): + fut = c.submit(inc, 1) + await wait(fut) + + await s.close() + + with pytest.raises(FutureCancelledError, match="connection to the scheduler"): +> await fut.result() + +distributed/tests/test_client.py:8216: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +distributed/client.py:422: in _result + result = await self.client._gather([self]) +distributed/client.py:2455: in _gather + response = await future +distributed/client.py:2507: in _gather_remote + response = await retry_operation(self.scheduler.gather, keys=keys) +distributed/utils_comm.py:441: in retry_operation + return await retry( +distributed/utils_comm.py:420: in retry + return await coro() +distributed/core.py:1256: in send_recv_from_rpc + comm = await self.pool.connect(self.addr) +distributed/core.py:1485: in connect + return await self._connect(addr=addr, timeout=timeout) +distributed/core.py:1429: in _connect + comm = await connect( +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +addr = 'tcp://127.0.0.1:35317', timeout = 5, deserialize = True +handshake_overrides = None +connection_args = {'extra_conn_args': {}, 'require_encryption': False, 'ssl_context': None} +scheme = 'tcp', loc = '127.0.0.1:35317' +backend = +connector = +comm = None, time_left = .time_left at 0xffff45adb7e0> +backoff_base = 0.01 + + async def connect( + addr, timeout=None, deserialize=True, handshake_overrides=None, **connection_args + ): + """ + Connect to the given address (a URI such as ``tcp://127.0.0.1:1234``) + and yield a ``Comm`` object. If the connection attempt fails, it is + retried until the *timeout* is expired. + """ + if timeout is None: + timeout = dask.config.get("distributed.comm.timeouts.connect") + timeout = parse_timedelta(timeout, default="seconds") + + scheme, loc = parse_address(addr) + backend = registry.get_backend(scheme) + connector = backend.get_connector() + comm = None + + start = time() + + def time_left(): + deadline = start + timeout + return max(0, deadline - time()) + + backoff_base = 0.01 + attempt = 0 + logger.debug("Establishing connection to %s", loc) + # Prefer multiple small attempts than one long attempt. This should protect + # primarily from DNS race conditions + # gh3104, gh4176, gh4167 + intermediate_cap = timeout / 5 + active_exception = None + while time_left() > 0: + try: + comm = await wait_for( + connector.connect(loc, deserialize=deserialize, **connection_args), + timeout=min(intermediate_cap, time_left()), + ) + break + except FatalCommClosedError: + raise + # Note: CommClosed inherits from OSError + except (asyncio.TimeoutError, OSError) as exc: + active_exception = exc + + # As described above, the intermediate timeout is used to distributed + # initial, bulk connect attempts homogeneously. In particular with + # the jitter upon retries we should not be worred about overloading + # any more DNS servers + intermediate_cap = timeout + # FullJitter see https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ + + upper_cap = min(time_left(), backoff_base * (2**attempt)) + backoff = random.uniform(0, upper_cap) + attempt += 1 + logger.debug( + "Could not connect to %s, waiting for %s before retrying", loc, backoff + ) + await asyncio.sleep(backoff) + else: +> raise OSError( + f"Timed out trying to connect to {addr} after {timeout} s" + ) from active_exception +E OSError: Timed out trying to connect to tcp://127.0.0.1:35317 after 5 s + +distributed/comm/core.py:368: OSError +----------------------------- Captured stderr call ----------------------------- +2025-03-12 03:45:59,056 - distributed.scheduler - INFO - State start +2025-03-12 03:45:59,077 - distributed.scheduler - INFO - Scheduler at: tcp://127.0.0.1:35317 +2025-03-12 03:45:59,091 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41443/status +2025-03-12 03:45:59,092 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 03:45:59,121 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:43209 +2025-03-12 03:45:59,131 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:43209 +2025-03-12 03:45:59,132 - distributed.worker - INFO - Worker name: 0 +2025-03-12 03:45:59,133 - distributed.worker - INFO - dashboard at: 127.0.0.1:35819 +2025-03-12 03:45:59,158 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:35317 +2025-03-12 03:45:59,159 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:45:59,160 - distributed.worker - INFO - Threads: 1 +2025-03-12 03:45:59,161 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 03:45:59,179 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-lzlqp4o6 +2025-03-12 03:45:59,180 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:45:59,198 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:33443 +2025-03-12 03:45:59,200 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:33443 +2025-03-12 03:45:59,200 - distributed.worker - INFO - Worker name: 1 +2025-03-12 03:45:59,201 - distributed.worker - INFO - dashboard at: 127.0.0.1:34653 +2025-03-12 03:45:59,219 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:35317 +2025-03-12 03:45:59,220 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:45:59,221 - distributed.worker - INFO - Threads: 2 +2025-03-12 03:45:59,242 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 03:45:59,243 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-xz1xry7_ +2025-03-12 03:45:59,244 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:45:59,323 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:43209 name: 0 +2025-03-12 03:45:59,341 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:43209 +2025-03-12 03:45:59,342 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:36788 +2025-03-12 03:45:59,343 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:33443 name: 1 +2025-03-12 03:45:59,357 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:33443 +2025-03-12 03:45:59,359 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:36798 +2025-03-12 03:45:59,359 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 03:45:59,361 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 03:45:59,363 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:35317 +2025-03-12 03:45:59,364 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:45:59,365 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:35317 +2025-03-12 03:45:59,366 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 03:45:59,367 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:35317 +2025-03-12 03:45:59,367 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:35317 +2025-03-12 03:45:59,384 - distributed.scheduler - INFO - Receive client connection: Client-2a2d2002-fe7f-11ef-9228-ad3bd1b43ef1 +2025-03-12 03:45:59,398 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:36812 +2025-03-12 03:45:59,435 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 03:45:59,436 - distributed.scheduler - INFO - Scheduler closing all comms +2025-03-12 03:45:59,438 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:43209. Reason: scheduler-close +2025-03-12 03:45:59,440 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:33443. Reason: scheduler-close +2025-03-12 03:45:59,441 - distributed.core - INFO - Connection to tcp://127.0.0.1:36788 has been closed. +2025-03-12 03:45:59,441 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:43209 name: 0 (stimulus_id='handle-worker-cleanup-1741700759.4414027') +2025-03-12 03:45:59,451 - distributed.core - INFO - Connection to tcp://127.0.0.1:36798 has been closed. +2025-03-12 03:45:59,451 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:33443 name: 1 (stimulus_id='handle-worker-cleanup-1741700759.4511564') +2025-03-12 03:45:59,452 - distributed.scheduler - WARNING - Removing worker 'tcp://127.0.0.1:33443' caused the cluster to lose already computed task(s), which will be recomputed elsewhere: {'inc-79bd6ad8fddafb0a73473b9d4cf597a2'} (stimulus_id='handle-worker-cleanup-1741700759.4511564') +2025-03-12 03:45:59,453 - distributed.scheduler - INFO - Lost all workers +2025-03-12 03:45:59,458 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 03:45:59,459 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 03:45:59,466 - distributed.batched - INFO - Batched Comm Closed Client local=tcp://127.0.0.1:35317 remote=tcp://127.0.0.1:36812> +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send + nbytes = yield coro + ^^^^^^^^^^ + File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run + value = future.result() + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 262, in write + raise CommClosedError() +distributed.comm.core.CommClosedError +2025-03-12 03:45:59,467 - distributed.batched - INFO - Batched Comm Closed Scheduler local=tcp://127.0.0.1:36788 remote=tcp://127.0.0.1:35317> +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 297, in write + raise StreamClosedError() +tornado.iostream.StreamClosedError: Stream is closed + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send + nbytes = yield coro + ^^^^^^^^^^ + File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run + value = future.result() + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 307, in write + convert_stream_closed_error(self, e) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error + raise CommClosedError(f"in {obj}: {exc}") from exc +distributed.comm.core.CommClosedError: in Scheduler local=tcp://127.0.0.1:36788 remote=tcp://127.0.0.1:35317>: Stream is closed +2025-03-12 03:45:59,468 - distributed.batched - INFO - Batched Comm Closed Scheduler local=tcp://127.0.0.1:36798 remote=tcp://127.0.0.1:35317> +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 297, in write + raise StreamClosedError() +tornado.iostream.StreamClosedError: Stream is closed + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send + nbytes = yield coro + ^^^^^^^^^^ + File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run + value = future.result() + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 307, in write + convert_stream_closed_error(self, e) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error + raise CommClosedError(f"in {obj}: {exc}") from exc +distributed.comm.core.CommClosedError: in Scheduler local=tcp://127.0.0.1:36798 remote=tcp://127.0.0.1:35317>: Stream is closed +2025-03-12 03:45:59,494 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:35317; closing. +2025-03-12 03:45:59,495 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:35317; closing. +2025-03-12 03:46:04,609 - distributed.client - ERROR - +ConnectionRefusedError: [Errno 111] Connection refused + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect + comm = await wait_for( + ^^^^^^^^^^^^^^^ + ...<2 lines>... + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for + return await fut + ^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 559, in connect + convert_stream_closed_error(self, e) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 140, in convert_stream_closed_error + raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc +distributed.comm.core.CommClosedError: in : ConnectionRefusedError: [Errno 111] Connection refused + +During handling of the above exception, another exception occurred: + +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 809, in wrapper + return await func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/client.py", line 1550, in _reconnect + await self._ensure_connected(timeout=timeout) + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/client.py", line 1580, in _ensure_connected + comm = await connect( + ^^^^^^^^^^^^^^ + self.scheduler.address, timeout=timeout, **self.connection_args + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 366, in connect + await asyncio.sleep(backoff) + File "/usr/lib/python3.13/asyncio/tasks.py", line 718, in sleep + return await future + ^^^^^^^^^^^^ +asyncio.exceptions.CancelledError __________________________________ test_nanny __________________________________ -fut = , timeout = 0 +fut = , timeout = 0 async def wait_for(fut: Awaitable[T], timeout: float) -> T: async with asyncio.timeout(timeout): @@ -6673,10 +7132,10 @@ stream = await self.client.connect( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -self = -host = '127.0.0.1', port = 43969, af = -ssl_options = -max_buffer_size = 31544631296.0, source_ip = None, source_port = None +self = +host = '127.0.0.1', port = 34571, af = +ssl_options = +max_buffer_size = 31544633344.0, source_ip = None, source_port = None timeout = None async def connect( @@ -6772,7 +7231,7 @@ self = exc_type = -exc_val = CancelledError(), exc_tb = +exc_val = CancelledError(), exc_tb = async def __aexit__( self, @@ -6857,7 +7316,7 @@ During handling of the above exception, another exception occurred: -fut = ._..test_func..async_fn at 0xffff477f51c0> +fut = ._..test_func..async_fn at 0xffff2b0ed1c0> timeout = 60 async def wait_for(fut: Awaitable[T], timeout: float) -> T: @@ -6929,7 +7388,7 @@ self = exc_type = -exc_val = CancelledError(), exc_tb = +exc_val = CancelledError(), exc_tb = async def __aexit__( self, @@ -6955,71 +7414,71 @@ /usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError ----------------------------- Captured stderr call ----------------------------- -2026-04-13 07:25:23,696 - distributed.scheduler - INFO - State start -2026-04-13 07:25:23,701 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45931 -2026-04-13 07:25:23,703 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36543/status -2026-04-13 07:25:23,705 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:25:23,736 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:39503' -2026-04-13 07:25:23,738 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:44279' -2026-04-13 07:25:24,444 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089262592 due to system memory limit of 58.76 GiB -2026-04-13 07:25:24,465 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:33793 -2026-04-13 07:25:24,466 - distributed.worker - INFO - Listening to: tls://127.0.0.1:33793 -2026-04-13 07:25:24,466 - distributed.worker - INFO - Worker name: 1 -2026-04-13 07:25:24,466 - distributed.worker - INFO - dashboard at: 127.0.0.1:39209 -2026-04-13 07:25:24,466 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:45931 -2026-04-13 07:25:24,466 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:25:24,466 - distributed.worker - INFO - Threads: 2 -2026-04-13 07:25:24,466 - distributed.worker - INFO - Memory: 58.76 GiB -2026-04-13 07:25:24,466 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-7i0c81re -2026-04-13 07:25:24,466 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:25:24,546 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089262592 due to system memory limit of 58.76 GiB -2026-04-13 07:25:24,567 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:44521 -2026-04-13 07:25:24,567 - distributed.worker - INFO - Listening to: tls://127.0.0.1:44521 -2026-04-13 07:25:24,567 - distributed.worker - INFO - Worker name: 0 -2026-04-13 07:25:24,567 - distributed.worker - INFO - dashboard at: 127.0.0.1:42689 -2026-04-13 07:25:24,567 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:45931 -2026-04-13 07:25:24,567 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:25:24,567 - distributed.worker - INFO - Threads: 1 -2026-04-13 07:25:24,567 - distributed.worker - INFO - Memory: 58.76 GiB -2026-04-13 07:25:24,567 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-274vkejb -2026-04-13 07:25:24,567 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:25:24,808 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:33793 name: 1 -2026-04-13 07:25:24,859 - distributed.worker - INFO - Starting Worker plugin shuffle -2026-04-13 07:25:24,860 - distributed.worker - INFO - Registered to: tls://127.0.0.1:45931 -2026-04-13 07:25:24,860 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:25:24,861 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:45931 -2026-04-13 07:25:24,858 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:33793 -2026-04-13 07:25:24,861 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:41676 -2026-04-13 07:25:24,914 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:44521 name: 0 -2026-04-13 07:25:24,965 - distributed.worker - INFO - Starting Worker plugin shuffle -2026-04-13 07:25:24,965 - distributed.worker - INFO - Registered to: tls://127.0.0.1:45931 -2026-04-13 07:25:24,966 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:25:24,966 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:45931 -2026-04-13 07:25:24,964 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:44521 -2026-04-13 07:25:24,967 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:41688 -2026-04-13 07:25:24,968 - distributed.core - INFO - Connection to tls://127.0.0.1:41688 has been closed. -2026-04-13 07:25:24,968 - distributed.core - INFO - Connection to tls://127.0.0.1:45931 has been closed. -2026-04-13 07:25:24,968 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:44521. Reason: worker-handle-scheduler-connection-broken -2026-04-13 07:25:24,968 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:44521 name: 0 (stimulus_id='handle-worker-cleanup-1776108324.9681966') -2026-04-13 07:25:24,994 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:39503'. Reason: worker-handle-scheduler-connection-broken -2026-04-13 07:25:24,996 - distributed.worker - INFO - Removing Worker plugin shuffle -2026-04-13 07:25:24,999 - distributed.nanny - INFO - Worker closed -2026-04-13 07:25:27,002 - distributed.nanny - ERROR - Worker process died unexpectedly -2026-04-13 07:25:27,297 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39503'. Reason: nanny-close-gracefully -2026-04-13 07:25:27,298 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39503' closed. -2026-04-13 07:25:54,986 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44279'. Reason: nanny-close -2026-04-13 07:25:54,987 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close -2026-04-13 07:25:54,998 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:33793. Reason: nanny-close -2026-04-13 07:25:54,999 - distributed.worker - INFO - Removing Worker plugin shuffle -2026-04-13 07:25:55,000 - distributed.core - INFO - Connection to tls://127.0.0.1:45931 has been closed. -2026-04-13 07:25:55,002 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:41676; closing. -2026-04-13 07:25:55,002 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:33793 name: 1 (stimulus_id='handle-worker-cleanup-1776108355.002573') -2026-04-13 07:25:55,004 - distributed.scheduler - INFO - Lost all workers -2026-04-13 07:25:55,023 - distributed.nanny - INFO - Worker closed -2026-04-13 07:25:55,427 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44279' closed. -2026-04-13 07:25:55,427 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown -2026-04-13 07:25:55,429 - distributed.scheduler - INFO - Scheduler closing all comms -2026-04-13 07:25:55,432 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying +2025-03-12 04:07:46,934 - distributed.scheduler - INFO - State start +2025-03-12 04:07:46,961 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:40939 +2025-03-12 04:07:46,971 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40381/status +2025-03-12 04:07:46,986 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:07:47,108 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:33763' +2025-03-12 04:07:47,142 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:41665' +2025-03-12 04:07:48,854 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089266688 due to system memory limit of 58.76 GiB +2025-03-12 04:07:48,915 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:41313 +2025-03-12 04:07:48,915 - distributed.worker - INFO - Listening to: tls://127.0.0.1:41313 +2025-03-12 04:07:48,915 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:07:48,915 - distributed.worker - INFO - dashboard at: 127.0.0.1:37335 +2025-03-12 04:07:48,915 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:40939 +2025-03-12 04:07:48,915 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:07:48,915 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:07:48,915 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:07:48,915 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-g8dwuh4n +2025-03-12 04:07:48,915 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:07:49,221 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089266688 due to system memory limit of 58.76 GiB +2025-03-12 04:07:49,298 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:36351 +2025-03-12 04:07:49,298 - distributed.worker - INFO - Listening to: tls://127.0.0.1:36351 +2025-03-12 04:07:49,298 - distributed.worker - INFO - Worker name: 1 +2025-03-12 04:07:49,298 - distributed.worker - INFO - dashboard at: 127.0.0.1:40955 +2025-03-12 04:07:49,298 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:40939 +2025-03-12 04:07:49,298 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:07:49,298 - distributed.worker - INFO - Threads: 2 +2025-03-12 04:07:49,298 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:07:49,298 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-hhch1oib +2025-03-12 04:07:49,298 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:07:49,720 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:41313 name: 0 +2025-03-12 04:07:49,778 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:41313 +2025-03-12 04:07:49,780 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:50756 +2025-03-12 04:07:49,781 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:07:49,782 - distributed.worker - INFO - Registered to: tls://127.0.0.1:40939 +2025-03-12 04:07:49,782 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:07:49,814 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40939 +2025-03-12 04:07:50,215 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:36351 name: 1 +2025-03-12 04:07:50,298 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:07:50,299 - distributed.worker - INFO - Registered to: tls://127.0.0.1:40939 +2025-03-12 04:07:50,299 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:07:50,296 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:36351 +2025-03-12 04:07:50,315 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:50764 +2025-03-12 04:07:50,315 - distributed.core - INFO - Connection to tls://127.0.0.1:50764 has been closed. +2025-03-12 04:07:50,326 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40939 +2025-03-12 04:07:50,326 - distributed.core - INFO - Connection to tls://127.0.0.1:40939 has been closed. +2025-03-12 04:07:50,326 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:36351. Reason: worker-handle-scheduler-connection-broken +2025-03-12 04:07:50,316 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:36351 name: 1 (stimulus_id='handle-worker-cleanup-1741702070.3160896') +2025-03-12 04:07:50,379 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:41665'. Reason: worker-handle-scheduler-connection-broken +2025-03-12 04:07:50,380 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:07:50,406 - distributed.nanny - INFO - Worker closed +2025-03-12 04:07:52,450 - distributed.nanny - ERROR - Worker process died unexpectedly +2025-03-12 04:07:53,518 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41665'. Reason: nanny-close-gracefully +2025-03-12 04:07:53,518 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41665' closed. +2025-03-12 04:08:20,370 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33763'. Reason: nanny-close +2025-03-12 04:08:20,370 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close +2025-03-12 04:08:20,382 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:41313. Reason: nanny-close +2025-03-12 04:08:20,382 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:08:20,384 - distributed.core - INFO - Connection to tls://127.0.0.1:40939 has been closed. +2025-03-12 04:08:20,386 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:50756; closing. +2025-03-12 04:08:20,387 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:41313 name: 0 (stimulus_id='handle-worker-cleanup-1741702100.3871908') +2025-03-12 04:08:20,390 - distributed.nanny - INFO - Worker closed +2025-03-12 04:08:20,389 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:08:21,162 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33763' closed. +2025-03-12 04:08:21,162 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:08:21,165 - distributed.scheduler - INFO - Scheduler closing all comms +2025-03-12 04:08:21,171 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory s, ws = await start_cluster( @@ -7030,17 +7489,17 @@ File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster raise TimeoutError("Cluster creation timeout") TimeoutError: Cluster creation timeout -2026-04-13 07:25:56,440 - distributed.scheduler - INFO - State start -2026-04-13 07:25:56,446 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35037 -2026-04-13 07:25:56,456 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37885/status -2026-04-13 07:25:56,458 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:25:56,481 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40941'. Reason: failure-to-start- -2026-04-13 07:25:56,481 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40941' closed. -2026-04-13 07:25:56,482 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32897'. Reason: failure-to-start- -2026-04-13 07:25:56,482 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32897' closed. -2026-04-13 07:25:56,482 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35037': TLS handshake failed with remote 'tls://127.0.0.1:33456': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:56,482 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35037': TLS handshake failed with remote 'tls://127.0.0.1:33458': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:56,482 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:22,175 - distributed.scheduler - INFO - State start +2025-03-12 04:08:22,201 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:40623 +2025-03-12 04:08:22,204 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43995/status +2025-03-12 04:08:22,214 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:22,245 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40415'. Reason: failure-to-start- +2025-03-12 04:08:22,245 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40415' closed. +2025-03-12 04:08:22,245 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35243'. Reason: failure-to-start- +2025-03-12 04:08:22,245 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35243' closed. +2025-03-12 04:08:22,262 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40623': TLS handshake failed with remote 'tls://127.0.0.1:38774': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:22,262 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40623': TLS handshake failed with remote 'tls://127.0.0.1:38780': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:22,262 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7109,17 +7568,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:25:57,488 - distributed.scheduler - INFO - State start -2026-04-13 07:25:57,498 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45817 -2026-04-13 07:25:57,504 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41305/status -2026-04-13 07:25:57,506 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:25:57,523 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43245'. Reason: failure-to-start- -2026-04-13 07:25:57,523 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43245' closed. -2026-04-13 07:25:57,523 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45461'. Reason: failure-to-start- -2026-04-13 07:25:57,523 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45461' closed. -2026-04-13 07:25:57,524 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45817': TLS handshake failed with remote 'tls://127.0.0.1:34346': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:57,524 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45817': TLS handshake failed with remote 'tls://127.0.0.1:34362': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:57,524 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:23,267 - distributed.scheduler - INFO - State start +2025-03-12 04:08:23,281 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39931 +2025-03-12 04:08:23,296 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38277/status +2025-03-12 04:08:23,310 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:23,341 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40867'. Reason: failure-to-start- +2025-03-12 04:08:23,341 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40867' closed. +2025-03-12 04:08:23,341 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40489'. Reason: failure-to-start- +2025-03-12 04:08:23,341 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40489' closed. +2025-03-12 04:08:23,354 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39931': TLS handshake failed with remote 'tls://127.0.0.1:56316': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:23,354 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39931': TLS handshake failed with remote 'tls://127.0.0.1:56322': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:23,354 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7188,17 +7647,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:25:58,536 - distributed.scheduler - INFO - State start -2026-04-13 07:25:58,546 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37259 -2026-04-13 07:25:58,552 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33843/status -2026-04-13 07:25:58,553 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:25:58,577 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36573'. Reason: failure-to-start- -2026-04-13 07:25:58,577 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36573' closed. -2026-04-13 07:25:58,577 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33645'. Reason: failure-to-start- -2026-04-13 07:25:58,577 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33645' closed. -2026-04-13 07:25:58,578 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37259': TLS handshake failed with remote 'tls://127.0.0.1:40134': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:58,578 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37259': TLS handshake failed with remote 'tls://127.0.0.1:40148': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:58,578 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:24,375 - distributed.scheduler - INFO - State start +2025-03-12 04:08:24,405 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33103 +2025-03-12 04:08:24,423 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40627/status +2025-03-12 04:08:24,446 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:24,488 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36717'. Reason: failure-to-start- +2025-03-12 04:08:24,489 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36717' closed. +2025-03-12 04:08:24,489 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44849'. Reason: failure-to-start- +2025-03-12 04:08:24,489 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44849' closed. +2025-03-12 04:08:24,489 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33103': TLS handshake failed with remote 'tls://127.0.0.1:55006': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:24,497 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33103': TLS handshake failed with remote 'tls://127.0.0.1:55010': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:24,498 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7267,17 +7726,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:25:59,588 - distributed.scheduler - INFO - State start -2026-04-13 07:25:59,594 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41673 -2026-04-13 07:25:59,604 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41725/status -2026-04-13 07:25:59,606 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:25:59,624 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43917'. Reason: failure-to-start- -2026-04-13 07:25:59,624 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43917' closed. -2026-04-13 07:25:59,624 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45945'. Reason: failure-to-start- -2026-04-13 07:25:59,624 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45945' closed. -2026-04-13 07:25:59,625 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41673': TLS handshake failed with remote 'tls://127.0.0.1:54002': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:59,625 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41673': TLS handshake failed with remote 'tls://127.0.0.1:54010': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:25:59,625 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:25,503 - distributed.scheduler - INFO - State start +2025-03-12 04:08:25,517 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46419 +2025-03-12 04:08:25,519 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:32967/status +2025-03-12 04:08:25,530 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:25,552 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39149'. Reason: failure-to-start- +2025-03-12 04:08:25,553 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39149' closed. +2025-03-12 04:08:25,553 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33433'. Reason: failure-to-start- +2025-03-12 04:08:25,553 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33433' closed. +2025-03-12 04:08:25,553 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46419': TLS handshake failed with remote 'tls://127.0.0.1:57084': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:25,553 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46419': TLS handshake failed with remote 'tls://127.0.0.1:57086': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:25,553 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7346,17 +7805,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:00,636 - distributed.scheduler - INFO - State start -2026-04-13 07:26:00,648 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:42967 -2026-04-13 07:26:00,650 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34321/status -2026-04-13 07:26:00,660 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:00,678 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41833'. Reason: failure-to-start- -2026-04-13 07:26:00,682 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41833' closed. -2026-04-13 07:26:00,683 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43145'. Reason: failure-to-start- -2026-04-13 07:26:00,683 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43145' closed. -2026-04-13 07:26:00,683 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42967': TLS handshake failed with remote 'tls://127.0.0.1:48668': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:00,683 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42967': TLS handshake failed with remote 'tls://127.0.0.1:48670': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:00,683 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:26,559 - distributed.scheduler - INFO - State start +2025-03-12 04:08:26,588 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45051 +2025-03-12 04:08:26,599 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40301/status +2025-03-12 04:08:26,601 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:26,644 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44769'. Reason: failure-to-start- +2025-03-12 04:08:26,644 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44769' closed. +2025-03-12 04:08:26,644 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45385'. Reason: failure-to-start- +2025-03-12 04:08:26,644 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45385' closed. +2025-03-12 04:08:26,645 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45051': TLS handshake failed with remote 'tls://127.0.0.1:56150': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:26,645 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45051': TLS handshake failed with remote 'tls://127.0.0.1:56164': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:26,645 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7425,17 +7884,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:01,688 - distributed.scheduler - INFO - State start -2026-04-13 07:26:01,702 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:42441 -2026-04-13 07:26:01,712 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46555/status -2026-04-13 07:26:01,713 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:01,736 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41607'. Reason: failure-to-start- -2026-04-13 07:26:01,736 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41607' closed. -2026-04-13 07:26:01,736 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45693'. Reason: failure-to-start- -2026-04-13 07:26:01,736 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45693' closed. -2026-04-13 07:26:01,736 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42441': TLS handshake failed with remote 'tls://127.0.0.1:41084': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:01,737 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42441': TLS handshake failed with remote 'tls://127.0.0.1:41090': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:01,737 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:27,659 - distributed.scheduler - INFO - State start +2025-03-12 04:08:27,673 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46387 +2025-03-12 04:08:27,683 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37827/status +2025-03-12 04:08:27,698 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:27,733 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38423'. Reason: failure-to-start- +2025-03-12 04:08:27,742 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38423' closed. +2025-03-12 04:08:27,742 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41089'. Reason: failure-to-start- +2025-03-12 04:08:27,742 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41089' closed. +2025-03-12 04:08:27,743 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46387': TLS handshake failed with remote 'tls://127.0.0.1:35604': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:27,743 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46387': TLS handshake failed with remote 'tls://127.0.0.1:35620': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:27,743 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7504,17 +7963,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:02,752 - distributed.scheduler - INFO - State start -2026-04-13 07:26:02,809 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43227 -2026-04-13 07:26:02,819 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45231/status -2026-04-13 07:26:02,821 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:02,840 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42615'. Reason: failure-to-start- -2026-04-13 07:26:02,840 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42615' closed. -2026-04-13 07:26:02,840 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43001'. Reason: failure-to-start- -2026-04-13 07:26:02,840 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43001' closed. -2026-04-13 07:26:02,841 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43227': TLS handshake failed with remote 'tls://127.0.0.1:46920': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:02,841 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43227': TLS handshake failed with remote 'tls://127.0.0.1:46932': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:02,841 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:28,751 - distributed.scheduler - INFO - State start +2025-03-12 04:08:28,789 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39165 +2025-03-12 04:08:28,804 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41819/status +2025-03-12 04:08:28,822 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:28,878 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40439'. Reason: failure-to-start- +2025-03-12 04:08:28,879 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40439' closed. +2025-03-12 04:08:28,879 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44967'. Reason: failure-to-start- +2025-03-12 04:08:28,879 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44967' closed. +2025-03-12 04:08:28,879 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39165': TLS handshake failed with remote 'tls://127.0.0.1:47774': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:28,879 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39165': TLS handshake failed with remote 'tls://127.0.0.1:47786': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:28,880 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7583,17 +8042,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:03,856 - distributed.scheduler - INFO - State start -2026-04-13 07:26:03,866 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46499 -2026-04-13 07:26:03,872 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36013/status -2026-04-13 07:26:03,874 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:03,892 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44893'. Reason: failure-to-start- -2026-04-13 07:26:03,892 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44893' closed. -2026-04-13 07:26:03,892 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33785'. Reason: failure-to-start- -2026-04-13 07:26:03,892 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33785' closed. -2026-04-13 07:26:03,893 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46499': TLS handshake failed with remote 'tls://127.0.0.1:45948': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:03,893 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46499': TLS handshake failed with remote 'tls://127.0.0.1:45958': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:03,893 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:29,903 - distributed.scheduler - INFO - State start +2025-03-12 04:08:29,945 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36641 +2025-03-12 04:08:29,948 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38249/status +2025-03-12 04:08:29,966 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:30,043 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35203'. Reason: failure-to-start- +2025-03-12 04:08:30,043 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35203' closed. +2025-03-12 04:08:30,043 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41539'. Reason: failure-to-start- +2025-03-12 04:08:30,043 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41539' closed. +2025-03-12 04:08:30,045 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36641': TLS handshake failed with remote 'tls://127.0.0.1:40926': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:30,045 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36641': TLS handshake failed with remote 'tls://127.0.0.1:40938': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:30,045 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7662,17 +8121,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:04,908 - distributed.scheduler - INFO - State start -2026-04-13 07:26:04,922 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41145 -2026-04-13 07:26:04,924 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44819/status -2026-04-13 07:26:04,925 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:04,959 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39423'. Reason: failure-to-start- -2026-04-13 07:26:04,959 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39423' closed. -2026-04-13 07:26:04,960 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37375'. Reason: failure-to-start- -2026-04-13 07:26:04,960 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37375' closed. -2026-04-13 07:26:04,960 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41145': TLS handshake failed with remote 'tls://127.0.0.1:49150': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:04,960 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41145': TLS handshake failed with remote 'tls://127.0.0.1:49162': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:04,960 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:31,068 - distributed.scheduler - INFO - State start +2025-03-12 04:08:31,110 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37791 +2025-03-12 04:08:31,112 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35749/status +2025-03-12 04:08:31,138 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:31,227 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45309'. Reason: failure-to-start- +2025-03-12 04:08:31,228 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45309' closed. +2025-03-12 04:08:31,228 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34423'. Reason: failure-to-start- +2025-03-12 04:08:31,228 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34423' closed. +2025-03-12 04:08:31,229 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37791': TLS handshake failed with remote 'tls://127.0.0.1:47984': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:31,238 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37791': TLS handshake failed with remote 'tls://127.0.0.1:47992': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:31,238 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7741,17 +8200,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:05,966 - distributed.scheduler - INFO - State start -2026-04-13 07:26:05,984 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43235 -2026-04-13 07:26:05,994 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:39039/status -2026-04-13 07:26:05,996 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:06,033 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34509'. Reason: failure-to-start- -2026-04-13 07:26:06,033 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34509' closed. -2026-04-13 07:26:06,033 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37447'. Reason: failure-to-start- -2026-04-13 07:26:06,034 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37447' closed. -2026-04-13 07:26:06,043 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43235': TLS handshake failed with remote 'tls://127.0.0.1:60614': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:06,043 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43235': TLS handshake failed with remote 'tls://127.0.0.1:60628': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:06,043 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:32,243 - distributed.scheduler - INFO - State start +2025-03-12 04:08:32,273 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36117 +2025-03-12 04:08:32,276 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41005/status +2025-03-12 04:08:32,298 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:32,351 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45543'. Reason: failure-to-start- +2025-03-12 04:08:32,351 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45543' closed. +2025-03-12 04:08:32,351 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34221'. Reason: failure-to-start- +2025-03-12 04:08:32,352 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34221' closed. +2025-03-12 04:08:32,352 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36117': TLS handshake failed with remote 'tls://127.0.0.1:55488': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:32,352 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36117': TLS handshake failed with remote 'tls://127.0.0.1:55494': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:32,352 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7820,17 +8279,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:07,048 - distributed.scheduler - INFO - State start -2026-04-13 07:26:07,063 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39349 -2026-04-13 07:26:07,065 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42099/status -2026-04-13 07:26:07,072 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:07,098 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35909'. Reason: failure-to-start- -2026-04-13 07:26:07,099 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35909' closed. -2026-04-13 07:26:07,099 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36507'. Reason: failure-to-start- -2026-04-13 07:26:07,099 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36507' closed. -2026-04-13 07:26:07,099 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39349': TLS handshake failed with remote 'tls://127.0.0.1:60808': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:07,100 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39349': TLS handshake failed with remote 'tls://127.0.0.1:60818': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:07,100 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:33,368 - distributed.scheduler - INFO - State start +2025-03-12 04:08:33,410 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41391 +2025-03-12 04:08:33,412 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45399/status +2025-03-12 04:08:33,430 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:33,539 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33563'. Reason: failure-to-start- +2025-03-12 04:08:33,539 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33563' closed. +2025-03-12 04:08:33,539 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41337'. Reason: failure-to-start- +2025-03-12 04:08:33,540 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41337' closed. +2025-03-12 04:08:33,540 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41391': TLS handshake failed with remote 'tls://127.0.0.1:55526': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:33,541 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41391': TLS handshake failed with remote 'tls://127.0.0.1:55528': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:33,550 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7899,17 +8358,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:08,108 - distributed.scheduler - INFO - State start -2026-04-13 07:26:08,114 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45893 -2026-04-13 07:26:08,123 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35973/status -2026-04-13 07:26:08,125 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:08,151 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41579'. Reason: failure-to-start- -2026-04-13 07:26:08,151 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41579' closed. -2026-04-13 07:26:08,151 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35683'. Reason: failure-to-start- -2026-04-13 07:26:08,152 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35683' closed. -2026-04-13 07:26:08,152 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45893': TLS handshake failed with remote 'tls://127.0.0.1:37820': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:08,152 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45893': TLS handshake failed with remote 'tls://127.0.0.1:37834': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:08,152 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:34,555 - distributed.scheduler - INFO - State start +2025-03-12 04:08:34,578 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41359 +2025-03-12 04:08:34,581 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33137/status +2025-03-12 04:08:34,595 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:34,640 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34901'. Reason: failure-to-start- +2025-03-12 04:08:34,640 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34901' closed. +2025-03-12 04:08:34,640 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33913'. Reason: failure-to-start- +2025-03-12 04:08:34,640 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33913' closed. +2025-03-12 04:08:34,641 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41359': TLS handshake failed with remote 'tls://127.0.0.1:56816': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:34,641 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41359': TLS handshake failed with remote 'tls://127.0.0.1:56828': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:34,641 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -7978,17 +8437,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:09,165 - distributed.scheduler - INFO - State start -2026-04-13 07:26:09,179 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35109 -2026-04-13 07:26:09,181 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38845/status -2026-04-13 07:26:09,183 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:09,212 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34493'. Reason: failure-to-start- -2026-04-13 07:26:09,213 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34493' closed. -2026-04-13 07:26:09,213 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45489'. Reason: failure-to-start- -2026-04-13 07:26:09,213 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45489' closed. -2026-04-13 07:26:09,213 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35109': TLS handshake failed with remote 'tls://127.0.0.1:39754': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:09,214 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35109': TLS handshake failed with remote 'tls://127.0.0.1:39758': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:09,214 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:35,660 - distributed.scheduler - INFO - State start +2025-03-12 04:08:35,686 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43273 +2025-03-12 04:08:35,688 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46183/status +2025-03-12 04:08:35,706 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:35,773 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36121'. Reason: failure-to-start- +2025-03-12 04:08:35,773 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36121' closed. +2025-03-12 04:08:35,782 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36331'. Reason: failure-to-start- +2025-03-12 04:08:35,782 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36331' closed. +2025-03-12 04:08:35,783 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43273': TLS handshake failed with remote 'tls://127.0.0.1:47648': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:35,783 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43273': TLS handshake failed with remote 'tls://127.0.0.1:47660': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:35,783 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8057,17 +8516,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:10,225 - distributed.scheduler - INFO - State start -2026-04-13 07:26:10,239 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44705 -2026-04-13 07:26:10,241 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38509/status -2026-04-13 07:26:10,247 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:10,270 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34629'. Reason: failure-to-start- -2026-04-13 07:26:10,271 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34629' closed. -2026-04-13 07:26:10,271 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35419'. Reason: failure-to-start- -2026-04-13 07:26:10,271 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35419' closed. -2026-04-13 07:26:10,271 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44705': TLS handshake failed with remote 'tls://127.0.0.1:43946': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:10,272 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44705': TLS handshake failed with remote 'tls://127.0.0.1:43958': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:10,272 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:36,788 - distributed.scheduler - INFO - State start +2025-03-12 04:08:36,802 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39837 +2025-03-12 04:08:36,804 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36845/status +2025-03-12 04:08:36,810 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:36,836 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44573'. Reason: failure-to-start- +2025-03-12 04:08:36,836 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44573' closed. +2025-03-12 04:08:36,836 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37787'. Reason: failure-to-start- +2025-03-12 04:08:36,837 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37787' closed. +2025-03-12 04:08:36,837 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39837': TLS handshake failed with remote 'tls://127.0.0.1:45194': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:36,837 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39837': TLS handshake failed with remote 'tls://127.0.0.1:45196': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:36,837 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8136,17 +8595,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:11,276 - distributed.scheduler - INFO - State start -2026-04-13 07:26:11,286 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:42795 -2026-04-13 07:26:11,292 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45235/status -2026-04-13 07:26:11,294 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:11,322 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40087'. Reason: failure-to-start- -2026-04-13 07:26:11,326 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40087' closed. -2026-04-13 07:26:11,327 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38233'. Reason: failure-to-start- -2026-04-13 07:26:11,327 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38233' closed. -2026-04-13 07:26:11,327 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42795': TLS handshake failed with remote 'tls://127.0.0.1:40746': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:11,328 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42795': TLS handshake failed with remote 'tls://127.0.0.1:40748': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:11,328 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:37,847 - distributed.scheduler - INFO - State start +2025-03-12 04:08:37,882 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44981 +2025-03-12 04:08:37,884 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35783/status +2025-03-12 04:08:37,898 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:37,957 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45033'. Reason: failure-to-start- +2025-03-12 04:08:37,957 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45033' closed. +2025-03-12 04:08:37,957 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39073'. Reason: failure-to-start- +2025-03-12 04:08:37,957 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39073' closed. +2025-03-12 04:08:37,968 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44981': TLS handshake failed with remote 'tls://127.0.0.1:33692': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:37,968 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44981': TLS handshake failed with remote 'tls://127.0.0.1:33702': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:37,968 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8215,17 +8674,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:12,336 - distributed.scheduler - INFO - State start -2026-04-13 07:26:12,342 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34733 -2026-04-13 07:26:12,348 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41033/status -2026-04-13 07:26:12,350 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:12,373 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34175'. Reason: failure-to-start- -2026-04-13 07:26:12,373 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34175' closed. -2026-04-13 07:26:12,373 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35071'. Reason: failure-to-start- -2026-04-13 07:26:12,373 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35071' closed. -2026-04-13 07:26:12,374 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34733': TLS handshake failed with remote 'tls://127.0.0.1:37070': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:12,374 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34733': TLS handshake failed with remote 'tls://127.0.0.1:37072': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:12,374 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:38,991 - distributed.scheduler - INFO - State start +2025-03-12 04:08:39,014 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33553 +2025-03-12 04:08:39,016 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:39655/status +2025-03-12 04:08:39,030 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:39,075 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35163'. Reason: failure-to-start- +2025-03-12 04:08:39,076 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35163' closed. +2025-03-12 04:08:39,076 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37635'. Reason: failure-to-start- +2025-03-12 04:08:39,076 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37635' closed. +2025-03-12 04:08:39,076 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33553': TLS handshake failed with remote 'tls://127.0.0.1:56712': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:39,077 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33553': TLS handshake failed with remote 'tls://127.0.0.1:56726': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:39,077 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8294,17 +8753,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:13,385 - distributed.scheduler - INFO - State start -2026-04-13 07:26:13,395 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:42473 -2026-04-13 07:26:13,397 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33857/status -2026-04-13 07:26:13,403 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:13,428 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36357'. Reason: failure-to-start- -2026-04-13 07:26:13,429 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36357' closed. -2026-04-13 07:26:13,429 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36945'. Reason: failure-to-start- -2026-04-13 07:26:13,429 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36945' closed. -2026-04-13 07:26:13,430 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42473': TLS handshake failed with remote 'tls://127.0.0.1:39510': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:13,430 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42473': TLS handshake failed with remote 'tls://127.0.0.1:39522': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:13,430 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:40,095 - distributed.scheduler - INFO - State start +2025-03-12 04:08:40,117 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33727 +2025-03-12 04:08:40,120 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35581/status +2025-03-12 04:08:40,134 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:40,187 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36699'. Reason: failure-to-start- +2025-03-12 04:08:40,187 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36699' closed. +2025-03-12 04:08:40,187 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33763'. Reason: failure-to-start- +2025-03-12 04:08:40,187 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33763' closed. +2025-03-12 04:08:40,188 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33727': TLS handshake failed with remote 'tls://127.0.0.1:60556': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:40,188 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33727': TLS handshake failed with remote 'tls://127.0.0.1:60570': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:40,188 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8373,17 +8832,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:14,444 - distributed.scheduler - INFO - State start -2026-04-13 07:26:14,458 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45569 -2026-04-13 07:26:14,468 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44851/status -2026-04-13 07:26:14,470 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:14,499 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40091'. Reason: failure-to-start- -2026-04-13 07:26:14,499 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40091' closed. -2026-04-13 07:26:14,499 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40129'. Reason: failure-to-start- -2026-04-13 07:26:14,499 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40129' closed. -2026-04-13 07:26:14,500 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45569': TLS handshake failed with remote 'tls://127.0.0.1:34882': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:14,500 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45569': TLS handshake failed with remote 'tls://127.0.0.1:34894': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:14,500 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:41,204 - distributed.scheduler - INFO - State start +2025-03-12 04:08:41,226 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43131 +2025-03-12 04:08:41,228 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43521/status +2025-03-12 04:08:41,239 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:41,281 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43865'. Reason: failure-to-start- +2025-03-12 04:08:41,281 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43865' closed. +2025-03-12 04:08:41,281 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44433'. Reason: failure-to-start- +2025-03-12 04:08:41,281 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44433' closed. +2025-03-12 04:08:41,290 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43131': TLS handshake failed with remote 'tls://127.0.0.1:55294': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:41,290 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43131': TLS handshake failed with remote 'tls://127.0.0.1:55310': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:41,290 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8452,17 +8911,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:15,504 - distributed.scheduler - INFO - State start -2026-04-13 07:26:15,514 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43705 -2026-04-13 07:26:15,521 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40109/status -2026-04-13 07:26:15,527 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:15,563 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34723'. Reason: failure-to-start- -2026-04-13 07:26:15,564 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34723' closed. -2026-04-13 07:26:15,564 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37691'. Reason: failure-to-start- -2026-04-13 07:26:15,564 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37691' closed. -2026-04-13 07:26:15,565 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43705': TLS handshake failed with remote 'tls://127.0.0.1:51934': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:15,566 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43705': TLS handshake failed with remote 'tls://127.0.0.1:51940': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:15,566 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:42,295 - distributed.scheduler - INFO - State start +2025-03-12 04:08:42,309 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35363 +2025-03-12 04:08:42,324 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42721/status +2025-03-12 04:08:42,334 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:42,388 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35943'. Reason: failure-to-start- +2025-03-12 04:08:42,388 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35943' closed. +2025-03-12 04:08:42,389 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36533'. Reason: failure-to-start- +2025-03-12 04:08:42,389 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36533' closed. +2025-03-12 04:08:42,389 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35363': TLS handshake failed with remote 'tls://127.0.0.1:43628': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:42,389 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35363': TLS handshake failed with remote 'tls://127.0.0.1:43642': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:42,389 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8531,17 +8990,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:16,576 - distributed.scheduler - INFO - State start -2026-04-13 07:26:16,590 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38331 -2026-04-13 07:26:16,600 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34999/status -2026-04-13 07:26:16,602 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:16,630 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33611'. Reason: failure-to-start- -2026-04-13 07:26:16,631 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33611' closed. -2026-04-13 07:26:16,631 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40757'. Reason: failure-to-start- -2026-04-13 07:26:16,631 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40757' closed. -2026-04-13 07:26:16,632 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38331': TLS handshake failed with remote 'tls://127.0.0.1:40216': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:16,632 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38331': TLS handshake failed with remote 'tls://127.0.0.1:40220': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:16,632 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:43,404 - distributed.scheduler - INFO - State start +2025-03-12 04:08:43,426 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:32785 +2025-03-12 04:08:43,428 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33539/status +2025-03-12 04:08:43,439 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:43,481 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34713'. Reason: failure-to-start- +2025-03-12 04:08:43,481 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34713' closed. +2025-03-12 04:08:43,481 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39689'. Reason: failure-to-start- +2025-03-12 04:08:43,481 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39689' closed. +2025-03-12 04:08:43,490 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32785': TLS handshake failed with remote 'tls://127.0.0.1:45212': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:43,490 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32785': TLS handshake failed with remote 'tls://127.0.0.1:45216': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:43,490 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8610,17 +9069,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:17,648 - distributed.scheduler - INFO - State start -2026-04-13 07:26:17,670 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45413 -2026-04-13 07:26:17,672 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38449/status -2026-04-13 07:26:17,674 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:17,709 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33235'. Reason: failure-to-start- -2026-04-13 07:26:17,710 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33235' closed. -2026-04-13 07:26:17,710 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35217'. Reason: failure-to-start- -2026-04-13 07:26:17,710 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35217' closed. -2026-04-13 07:26:17,715 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45413': TLS handshake failed with remote 'tls://127.0.0.1:39432': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:17,715 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45413': TLS handshake failed with remote 'tls://127.0.0.1:39436': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:17,715 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:44,496 - distributed.scheduler - INFO - State start +2025-03-12 04:08:44,521 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39717 +2025-03-12 04:08:44,524 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:32965/status +2025-03-12 04:08:44,534 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:44,589 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34889'. Reason: failure-to-start- +2025-03-12 04:08:44,589 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34889' closed. +2025-03-12 04:08:44,589 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36617'. Reason: failure-to-start- +2025-03-12 04:08:44,589 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36617' closed. +2025-03-12 04:08:44,598 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39717': TLS handshake failed with remote 'tls://127.0.0.1:43656': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:44,598 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39717': TLS handshake failed with remote 'tls://127.0.0.1:43672': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:44,598 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8689,17 +9148,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:18,720 - distributed.scheduler - INFO - State start -2026-04-13 07:26:18,730 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34521 -2026-04-13 07:26:18,740 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44475/status -2026-04-13 07:26:18,742 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:18,787 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45267'. Reason: failure-to-start- -2026-04-13 07:26:18,788 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45267' closed. -2026-04-13 07:26:18,788 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46449'. Reason: failure-to-start- -2026-04-13 07:26:18,788 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46449' closed. -2026-04-13 07:26:18,789 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34521': TLS handshake failed with remote 'tls://127.0.0.1:44618': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:18,789 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34521': TLS handshake failed with remote 'tls://127.0.0.1:44626': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:18,789 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:45,604 - distributed.scheduler - INFO - State start +2025-03-12 04:08:45,626 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36407 +2025-03-12 04:08:45,628 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33251/status +2025-03-12 04:08:45,638 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:45,687 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38969'. Reason: failure-to-start- +2025-03-12 04:08:45,688 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38969' closed. +2025-03-12 04:08:45,688 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35489'. Reason: failure-to-start- +2025-03-12 04:08:45,688 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35489' closed. +2025-03-12 04:08:45,688 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36407': TLS handshake failed with remote 'tls://127.0.0.1:54112': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:45,689 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36407': TLS handshake failed with remote 'tls://127.0.0.1:54118': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:45,689 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8768,17 +9227,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:19,800 - distributed.scheduler - INFO - State start -2026-04-13 07:26:19,810 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34403 -2026-04-13 07:26:19,820 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40595/status -2026-04-13 07:26:19,822 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:19,853 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33653'. Reason: failure-to-start- -2026-04-13 07:26:19,853 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33653' closed. -2026-04-13 07:26:19,853 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43929'. Reason: failure-to-start- -2026-04-13 07:26:19,854 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43929' closed. -2026-04-13 07:26:19,858 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34403': TLS handshake failed with remote 'tls://127.0.0.1:59906': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:19,859 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34403': TLS handshake failed with remote 'tls://127.0.0.1:59914': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:19,859 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:08:46,704 - distributed.scheduler - INFO - State start +2025-03-12 04:08:46,722 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34571 +2025-03-12 04:08:46,724 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45471/status +2025-03-12 04:08:46,735 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:08:46,806 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42381'. Reason: failure-to-start- +2025-03-12 04:08:46,806 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42381' closed. +2025-03-12 04:08:46,806 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39661'. Reason: failure-to-start- +2025-03-12 04:08:46,807 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39661' closed. +2025-03-12 04:08:46,807 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34571': TLS handshake failed with remote 'tls://127.0.0.1:48694': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:46,807 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34571': TLS handshake failed with remote 'tls://127.0.0.1:48700': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:08:46,808 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -8847,246 +9306,1410 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:20,868 - distributed.scheduler - INFO - State start -2026-04-13 07:26:20,874 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44763 -2026-04-13 07:26:20,876 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37639/status -2026-04-13 07:26:20,889 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:20,908 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37405'. Reason: failure-to-start- -2026-04-13 07:26:20,909 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37405' closed. -2026-04-13 07:26:20,909 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37663'. Reason: failure-to-start- -2026-04-13 07:26:20,909 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37663' closed. -2026-04-13 07:26:20,915 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44763': TLS handshake failed with remote 'tls://127.0.0.1:58016': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:20,915 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44763': TLS handshake failed with remote 'tls://127.0.0.1:58020': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:20,915 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe - comm = await self.rpc.connect(saddr) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect - return await self._connect(addr=addr, timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect - comm = await connect( - ^^^^^^^^^^^^^^ - ...<4 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect - comm = await wait_for( - ^^^^^^^^^^^^^^^ - ...<2 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect - stream = await self.client.connect( - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ) - ^ - File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect - af, addr, stream = await connector.start(connect_timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -asyncio.exceptions.CancelledError +____________________________ test_log_remove_worker ____________________________ -The above exception was the direct cause of the following exception: +c = +s = +a = +b = -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start - await wait_for(self.start_unsafe(), timeout=timeout) - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for - async with asyncio.timeout(timeout): - ~~~~~~~~~~~~~~~^^^^^^^^^ - File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__ - raise TimeoutError from exc_val -TimeoutError + @gen_cluster(client=True) + async def test_log_remove_worker(c, s, a, b): + # Computed task + x = c.submit(inc, 1, key="x", workers=a.address) + await x + ev = Event() + # Processing task + y = c.submit( + lambda ev: ev.wait(), ev, key="y", workers=a.address, allow_other_workers=True + ) + await wait_for_state("y", "processing", s) + # Scattered task + z = await c.scatter({"z": 3}, workers=a.address) + + s._broker.truncate() + + with captured_logger("distributed.scheduler", level=logging.INFO) as log: + # Successful graceful shutdown + await s.retire_workers([a.address], stimulus_id="graceful") + # Refuse to retire gracefully as there's nowhere to put x and z + await s.retire_workers([b.address], stimulus_id="graceful_abort") + await asyncio.sleep(0.2) + # Ungraceful shutdown + await s.remove_worker(b.address, stimulus_id="ungraceful") + await asyncio.sleep(0.2) + await ev.set() + + assert log.getvalue().splitlines() == [ + # Successful graceful + f"Retire worker addresses (stimulus_id='graceful') ['{a.address}']", + f"Remove worker addr: {a.address} name: {a.name} (stimulus_id='graceful')", + f"Retired worker '{a.address}' (stimulus_id='graceful')", + # Aborted graceful + f"Retire worker addresses (stimulus_id='graceful_abort') ['{b.address}']", + f"Could not retire worker '{b.address}': unique data could not be " + "moved to any other worker (stimulus_id='graceful_abort')", + # Ungraceful + f"Remove worker addr: {b.address} name: {b.name} (stimulus_id='ungraceful')", + f"Removing worker '{b.address}' caused the cluster to lose already " + "computed task(s), which will be recomputed elsewhere: {'x'} " + "(stimulus_id='ungraceful')", + f"Removing worker '{b.address}' caused the cluster to lose scattered " + "data, which can't be recovered: {'z'} (stimulus_id='ungraceful')", + "Lost all workers", + ] + + events = {topic: [ev for _, ev in evs] for topic, evs in s.get_events().items()} + for evs in events.values(): + for ev in evs: + if ev.get("action", None) == "retire-workers": + for k in ("retired", "could-not-retire"): + ev[k] = {addr: "snip" for addr in ev[k]} + if "stimulus_id" in ev: # Strip timestamp + ev["stimulus_id"] = ev["stimulus_id"].rsplit("-", 1)[0] + +> assert events == { + a.address: [ + { + "action": "worker-status-change", + "prev-status": "running", + "status": "closing_gracefully", + "stimulus_id": "graceful", + }, + { + "action": "remove-worker", + "lost-computed-tasks": set(), + "lost-scattered-tasks": set(), + "processing-tasks": {"y"}, + "expected": True, + "stimulus_id": "graceful", + }, + {"action": "retired", "stimulus_id": "graceful"}, + ], + b.address: [ + { + "action": "worker-status-change", + "prev-status": "running", + "status": "closing_gracefully", + "stimulus_id": "graceful_abort", + }, + {"action": "could-not-retire", "stimulus_id": "graceful_abort"}, + { + "action": "worker-status-change", + "prev-status": "closing_gracefully", + "status": "running", + "stimulus_id": "worker-status-change", + }, + { + "action": "remove-worker", + "lost-computed-tasks": {"x"}, + "lost-scattered-tasks": {"z"}, + "processing-tasks": {"y"}, + "expected": False, + "stimulus_id": "ungraceful", + }, + {"action": "closing-worker", "reason": "scheduler-remove-worker"}, + ], + "all": [ + { + "action": "remove-worker", + "lost-computed-tasks": set(), + "lost-scattered-tasks": set(), + "processing-tasks": {"y"}, + "expected": True, + "stimulus_id": "graceful", + "worker": a.address, + }, + { + "action": "retire-workers", + "stimulus_id": "graceful", + "retired": {a.address: "snip"}, + "could-not-retire": {}, + }, + { + "action": "retire-workers", + "stimulus_id": "graceful_abort", + "retired": {}, + "could-not-retire": {b.address: "snip"}, + }, + { + "action": "remove-worker", + "lost-computed-tasks": {"x"}, + "lost-scattered-tasks": {"z"}, + "processing-tasks": {"y"}, + "expected": False, + "stimulus_id": "ungraceful", + "worker": b.address, + }, + ], + "worker-get-client": [{"client": c.id, "timeout": 5, "worker": b.address}], + } +E AssertionError: assert {'tcp://127.0.0.1:41087': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:41087'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:41087': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:45237': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:45237'}], 'tcp://127.0.0.1:45237': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:41087'}, {'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} == {'tcp://127.0.0.1:41087': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:45237': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:41087'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:41087': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:45237': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:45237'}], 'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} +E +E Common items: +E {'all': [{'action': 'remove-worker', +E 'expected': True, +E 'lost-computed-tasks': set(), +E 'lost-scattered-tasks': set(), +E 'processing-tasks': {'y'}, +E 'stimulus_id': 'graceful', +E 'worker': 'tcp://127.0.0.1:41087'}, +E {'action': 'retire-workers', +E 'could-not-retire': {}, +E 'retired': {'tcp://127.0.0.1:41087': 'snip'}, +E 'stimulus_id': 'graceful'}, +E {'action': 'retire-workers', +E 'could-not-retire': {'tcp://127.0.0.1:45237': 'snip'}, +E 'retired': {}, +E 'stimulus_id': 'graceful_abort'}, +E {'action': 'remove-worker', +E 'expected': False, +E 'lost-computed-tasks': {'x'}, +E 'lost-scattered-tasks': {'z'}, +E 'processing-tasks': {'y'}, +E 'stimulus_id': 'ungraceful', +E 'worker': 'tcp://127.0.0.1:45237'}], +E 'tcp://127.0.0.1:41087': [{'action': 'worker-status-change', +E 'prev-status': 'running', +E 'status': 'closing_gracefully', +E 'stimulus_id': 'graceful'}, +E {'action': 'remove-worker', +E 'expected': True, +E 'lost-computed-tasks': set(), +E 'lost-scattered-tasks': set(), +E 'processing-tasks': {'y'}, +E 'stimulus_id': 'graceful'}, +E {'action': 'retired', 'stimulus_id': 'graceful'}], +E 'tcp://127.0.0.1:45237': [{'action': 'worker-status-change', +E 'prev-status': 'running', +E 'status': 'closing_gracefully', +E 'stimulus_id': 'graceful_abort'}, +E {'action': 'could-not-retire', +E 'stimulus_id': 'graceful_abort'}, +E {'action': 'worker-status-change', +E 'prev-status': 'closing_gracefully', +E 'status': 'running', +E 'stimulus_id': 'worker-status-change'}, +E {'action': 'remove-worker', +E 'expected': False, +E 'lost-computed-tasks': {'x'}, +E 'lost-scattered-tasks': {'z'}, +E 'processing-tasks': {'y'}, +E 'stimulus_id': 'ungraceful'}, +E {'action': 'closing-worker', +E 'reason': 'scheduler-remove-worker'}]} +E Differing items: +E {'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:41087'}, {'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} != {'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} +E +E Full diff: +E { +E 'all': [ +E { +E 'action': 'remove-worker', +E 'expected': True, +E 'lost-computed-tasks': set(), +E 'lost-scattered-tasks': set(), +E 'processing-tasks': { +E 'y', +E }, +E 'stimulus_id': 'graceful', +E 'worker': 'tcp://127.0.0.1:41087', +E }, +E { +E 'action': 'retire-workers', +E 'could-not-retire': {}, +E 'retired': { +E 'tcp://127.0.0.1:41087': 'snip', +E }, +E 'stimulus_id': 'graceful', +E }, +E { +E 'action': 'retire-workers', +E 'could-not-retire': { +E 'tcp://127.0.0.1:45237': 'snip', +E }, +E 'retired': {}, +E 'stimulus_id': 'graceful_abort', +E }, +E { +E 'action': 'remove-worker', +E 'expected': False, +E 'lost-computed-tasks': { +E 'x', +E }, +E 'lost-scattered-tasks': { +E 'z', +E }, +E 'processing-tasks': { +E 'y', +E }, +E 'stimulus_id': 'ungraceful', +E 'worker': 'tcp://127.0.0.1:45237', +E }, +E ], +E 'tcp://127.0.0.1:41087': [ +E { +E 'action': 'worker-status-change', +E 'prev-status': 'running', +E 'status': 'closing_gracefully', +E 'stimulus_id': 'graceful', +E }, +E { +E 'action': 'remove-worker', +E 'expected': True, +E 'lost-computed-tasks': set(), +E 'lost-scattered-tasks': set(), +E 'processing-tasks': { +E 'y', +E }, +E 'stimulus_id': 'graceful', +E }, +E { +E 'action': 'retired', +E 'stimulus_id': 'graceful', +E }, +E ], +E 'tcp://127.0.0.1:45237': [ +E { +E 'action': 'worker-status-change', +E 'prev-status': 'running', +E 'status': 'closing_gracefully', +E 'stimulus_id': 'graceful_abort', +E }, +E { +E 'action': 'could-not-retire', +E 'stimulus_id': 'graceful_abort', +E }, +E { +E 'action': 'worker-status-change', +E 'prev-status': 'closing_gracefully', +E 'status': 'running', +E 'stimulus_id': 'worker-status-change', +E }, +E { +E 'action': 'remove-worker', +E 'expected': False, +E 'lost-computed-tasks': { +E 'x', +E }, +E 'lost-scattered-tasks': { +E 'z', +E }, +E 'processing-tasks': { +E 'y', +E }, +E 'stimulus_id': 'ungraceful', +E }, +E { +E 'action': 'closing-worker', +E 'reason': 'scheduler-remove-worker', +E }, +E ], +E 'worker-get-client': [ +E { +E 'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', +E 'timeout': 5, +E + 'worker': 'tcp://127.0.0.1:41087', +E + }, +E + { +E + 'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', +E + 'timeout': 5, +E 'worker': 'tcp://127.0.0.1:45237', +E }, +E ], +E } + +distributed/tests/test_worker.py:3016: AssertionError +----------------------------- Captured stderr call ----------------------------- +2025-03-12 04:14:40,992 - distributed.scheduler - INFO - State start +2025-03-12 04:14:41,022 - distributed.scheduler - INFO - Scheduler at: tcp://127.0.0.1:38811 +2025-03-12 04:14:41,024 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36359/status +2025-03-12 04:14:41,035 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:14:41,068 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:41087 +2025-03-12 04:14:41,074 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:41087 +2025-03-12 04:14:41,077 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:14:41,083 - distributed.worker - INFO - dashboard at: 127.0.0.1:34831 +2025-03-12 04:14:41,098 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:38811 +2025-03-12 04:14:41,100 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:14:41,102 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:14:41,104 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:14:41,114 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-ymxqtzn7 +2025-03-12 04:14:41,117 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:14:41,124 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:45237 +2025-03-12 04:14:41,135 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:45237 +2025-03-12 04:14:41,137 - distributed.worker - INFO - Worker name: 1 +2025-03-12 04:14:41,139 - distributed.worker - INFO - dashboard at: 127.0.0.1:44165 +2025-03-12 04:14:41,141 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:38811 +2025-03-12 04:14:41,152 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:14:41,158 - distributed.worker - INFO - Threads: 2 +2025-03-12 04:14:41,161 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:14:41,167 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-m_8a9h4_ +2025-03-12 04:14:41,178 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:14:41,307 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:41087 name: 0 +2025-03-12 04:14:41,395 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:41087 +2025-03-12 04:14:41,414 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:49114 +2025-03-12 04:14:41,414 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:45237 name: 1 +2025-03-12 04:14:41,510 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:45237 +2025-03-12 04:14:41,512 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:49128 +2025-03-12 04:14:41,527 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:14:41,529 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:14:41,540 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:38811 +2025-03-12 04:14:41,555 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:14:41,566 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:38811 +2025-03-12 04:14:41,568 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:14:41,584 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:38811 +2025-03-12 04:14:41,584 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:38811 +2025-03-12 04:14:41,685 - distributed.scheduler - INFO - Receive client connection: Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:14:41,785 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:49138 +2025-03-12 04:14:41,849 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful') ['tcp://127.0.0.1:41087'] +2025-03-12 04:14:41,849 - distributed.active_memory_manager - INFO - Retiring worker tcp://127.0.0.1:41087; 2 keys are being moved away. +2025-03-12 04:14:41,890 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:41087 name: 0 (stimulus_id='graceful') +2025-03-12 04:14:41,890 - distributed.scheduler - INFO - Retired worker 'tcp://127.0.0.1:41087' (stimulus_id='graceful') +2025-03-12 04:14:41,891 - distributed.scheduler - INFO - Retire worker addresses (stimulus_id='graceful_abort') ['tcp://127.0.0.1:45237'] +2025-03-12 04:14:41,891 - distributed.active_memory_manager - WARNING - Tried retiring worker tcp://127.0.0.1:45237, but 2 tasks could not be moved as there are no suitable workers to receive them. The worker will not be retired. +2025-03-12 04:14:41,891 - distributed.scheduler - WARNING - Could not retire worker 'tcp://127.0.0.1:45237': unique data could not be moved to any other worker (stimulus_id='graceful_abort') +2025-03-12 04:14:42,093 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:45237 name: 1 (stimulus_id='ungraceful') +2025-03-12 04:14:42,093 - distributed.scheduler - WARNING - Removing worker 'tcp://127.0.0.1:45237' caused the cluster to lose already computed task(s), which will be recomputed elsewhere: {'x'} (stimulus_id='ungraceful') +2025-03-12 04:14:42,093 - distributed.scheduler - ERROR - Removing worker 'tcp://127.0.0.1:45237' caused the cluster to lose scattered data, which can't be recovered: {'z'} (stimulus_id='ungraceful') +2025-03-12 04:14:42,093 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:14:42,094 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:45237. Reason: scheduler-remove-worker +2025-03-12 04:14:42,102 - distributed.worker.state_machine - WARNING - Async instruction for > ended with CancelledError +2025-03-12 04:14:42,118 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:14:42,134 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:49128; closing. +2025-03-12 04:14:42,312 - distributed.scheduler - INFO - Remove client Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:14:42,331 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:49138; closing. +2025-03-12 04:14:42,331 - distributed.scheduler - INFO - Remove client Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:14:42,350 - distributed.core - INFO - Connection to tcp://127.0.0.1:38811 has been closed. +2025-03-12 04:14:42,350 - distributed.scheduler - INFO - Close client connection: Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:14:42,378 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:41087. Reason: worker-close +2025-03-12 04:14:42,381 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:14:42,401 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:49114; closing. +2025-03-12 04:14:42,401 - distributed.core - INFO - Connection to tcp://127.0.0.1:38811 has been closed. +2025-03-12 04:14:42,418 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:14:42,421 - distributed.scheduler - INFO - Scheduler closing all comms +__________________________ test_fail_to_pickle_spill ___________________________ + +fut = , timeout = 5 + + async def wait_for(fut: Awaitable[T], timeout: float) -> T: + async with asyncio.timeout(timeout): +> return await fut + +distributed/utils.py:1914: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = Scheduler local=tcp://127.0.0.1:46738 remote=tcp://127.0.0.1:44581> +deserializers = None + + async def read(self, deserializers=None): + stream = self.stream + if stream is None: + raise CommClosedError() + + fmt = "Q" + fmt_size = struct.calcsize(fmt) + + try: + # Don't store multiple numpy or parquet buffers into the same buffer, or + # none will be released until all are released. +> frames_nosplit_nbytes_bin = await stream.read_bytes(fmt_size) +E asyncio.exceptions.CancelledError + +distributed/comm/tcp.py:225: CancelledError The above exception was the direct cause of the following exception: -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory - s, ws = await start_cluster( - ^^^^^^^^^^^^^^^^^^^^ - ...<9 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster - await asyncio.gather(*workers) - File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable - return await awaitable - ^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start - raise asyncio.TimeoutError( - f"{type(self).__name__} start timed out after {timeout}s." - ) from exc -TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:21,920 - distributed.scheduler - INFO - State start -2026-04-13 07:26:21,934 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41421 -2026-04-13 07:26:21,944 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42563/status -2026-04-13 07:26:21,946 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:21,996 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32837'. Reason: failure-to-start- -2026-04-13 07:26:21,996 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32837' closed. -2026-04-13 07:26:21,996 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38811'. Reason: failure-to-start- -2026-04-13 07:26:21,996 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38811' closed. -2026-04-13 07:26:21,997 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41421': TLS handshake failed with remote 'tls://127.0.0.1:54578': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:21,997 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41421': TLS handshake failed with remote 'tls://127.0.0.1:54580': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:21,997 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe - comm = await self.rpc.connect(saddr) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect - return await self._connect(addr=addr, timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect - comm = await connect( - ^^^^^^^^^^^^^^ - ...<4 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect - comm = await wait_for( - ^^^^^^^^^^^^^^^ - ...<2 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for +args = (), kwds = {} + + @wraps(func) + def inner(*args, **kwds): + with self._recreate_cm(): +> return func(*args, **kwds) + +/usr/lib/python3.13/contextlib.py:85: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +/usr/lib/python3.13/contextlib.py:85: in inner + return func(*args, **kwds) +distributed/utils_test.py:1090: in test_func + return _run_and_close_tornado(async_fn_outer) +distributed/utils_test.py:380: in _run_and_close_tornado + return asyncio_run(inner_fn(), loop_factory=get_loop_factory()) +/usr/lib/python3.13/asyncio/runners.py:195: in run + return runner.run(main) +/usr/lib/python3.13/asyncio/runners.py:118: in run + return self._loop.run_until_complete(task) +/usr/lib/python3.13/asyncio/base_events.py:725: in run_until_complete + return future.result() +distributed/utils_test.py:377: in inner_fn + return await async_fn(*args, **kwargs) +distributed/utils_test.py:1087: in async_fn_outer + return await utils_wait_for(async_fn(), timeout=timeout * 2) +distributed/utils.py:1914: in wait_for return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect - stream = await self.client.connect( - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ) - ^ - File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect - af, addr, stream = await connector.start(connect_timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -asyncio.exceptions.CancelledError +distributed/utils_test.py:1008: in async_fn + _client_factory(s) as c, +/usr/lib/python3.13/contextlib.py:214: in __aenter__ + return await anext(self.gen) +distributed/utils_test.py:957: in _client_factory + async with Client( +distributed/client.py:1700: in __aenter__ + await self +distributed/client.py:1512: in _start + await self._ensure_connected(timeout=timeout) +distributed/client.py:1604: in _ensure_connected + msg = await wait_for(comm.read(), timeout) +distributed/utils.py:1913: in wait_for + async with asyncio.timeout(timeout): +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +self = +exc_type = +exc_val = CancelledError(), exc_tb = + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + assert self._state in (_State.ENTERED, _State.EXPIRING) + + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._timeout_handler = None + + if self._state is _State.EXPIRING: + self._state = _State.EXPIRED + + if self._task.uncancel() <= self._cancelling and exc_type is not None: + # Since there are no new cancel requests, we're + # handling this. + if issubclass(exc_type, exceptions.CancelledError): +> raise TimeoutError from exc_val +E TimeoutError + +/usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError +----------------------------- Captured stderr call ----------------------------- +2025-03-12 04:15:57,625 - distributed.scheduler - INFO - State start +2025-03-12 04:15:57,647 - distributed.scheduler - INFO - Scheduler at: tcp://127.0.0.1:44581 +2025-03-12 04:15:57,650 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42159/status +2025-03-12 04:15:57,652 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:15:57,671 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:35963 +2025-03-12 04:15:57,682 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:35963 +2025-03-12 04:15:57,684 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:15:57,695 - distributed.worker - INFO - dashboard at: 127.0.0.1:39163 +2025-03-12 04:15:57,706 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:44581 +2025-03-12 04:15:57,708 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:15:57,721 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:15:57,724 - distributed.worker - INFO - Memory: 0.98 kiB +2025-03-12 04:15:57,734 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-9_xle41h +2025-03-12 04:15:57,737 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:15:57,829 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:35963 name: 0 +2025-03-12 04:15:57,915 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:35963 +2025-03-12 04:15:57,917 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:44404 +2025-03-12 04:15:57,918 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:15:57,921 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:44581 +2025-03-12 04:15:57,940 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:15:57,959 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:44581 +2025-03-12 04:15:58,067 - distributed.scheduler - INFO - Receive client connection: Client-5a386f85-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:15:58,153 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:46738 +2025-03-12 04:16:04,539 - distributed.worker.memory - WARNING - gc.collect() took 1.699s. This is usually a sign that some tasks handle too many Python objects at the same time. Rechunking the work into smaller tasks might help. +2025-03-12 04:16:04,541 - distributed.worker.memory - WARNING - Worker is at 96101171% memory usage. Pausing worker. Process memory: 916.49 MiB -- Worker memory limit: 0.98 kiB +2025-03-12 04:16:04,552 - distributed.worker.memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 916.49 MiB -- Worker memory limit: 0.98 kiB +2025-03-12 04:16:04,564 - distributed.core - INFO - Event loop was unresponsive in Scheduler for 6.90s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:16:04,565 - distributed.core - INFO - Event loop was unresponsive in Worker for 6.61s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:16:04,583 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:35963. Reason: worker-close +2025-03-12 04:16:04,594 - distributed.core - INFO - Connection to tcp://127.0.0.1:46738 has been closed. +2025-03-12 04:16:04,594 - distributed.scheduler - INFO - Remove client Client-5a386f85-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:16:04,597 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:16:04,609 - distributed.scheduler - INFO - Close client connection: Client-5a386f85-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:16:04,620 - distributed.worker - ERROR - Failed to communicate with scheduler during heartbeat. +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 225, in read + frames_nosplit_nbytes_bin = await stream.read_bytes(fmt_size) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start - await wait_for(self.start_unsafe(), timeout=timeout) - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for - async with asyncio.timeout(timeout): - ~~~~~~~~~~~~~~~^^^^^^^^^ - File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__ - raise TimeoutError from exc_val -TimeoutError + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1269, in heartbeat + response = await retry_operation( + ^^^^^^^^^^^^^^^^^^^^^^ + ...<14 lines>... + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 441, in retry_operation + return await retry( + ^^^^^^^^^^^^ + ...<5 lines>... + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 420, in retry + return await coro() + ^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1259, in send_recv_from_rpc + return await send_recv(comm=comm, op=key, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1018, in send_recv + response = await comm.read(deserializers=deserializers) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read + convert_stream_closed_error(self, e) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error + raise CommClosedError(f"in {obj}: {exc}") from exc +distributed.comm.core.CommClosedError: in : Stream is closed +2025-03-12 04:16:04,641 - distributed.core - INFO - Connection to tcp://127.0.0.1:44581 has been closed. +2025-03-12 04:16:04,674 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:44404; closing. +2025-03-12 04:16:04,674 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:35963 name: 0 (stimulus_id='handle-worker-cleanup-1741702564.6744888') +2025-03-12 04:16:04,677 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:16:04,694 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:16:04,697 - distributed.scheduler - INFO - Scheduler closing all comms +__________________________ test_pause_while_spilling ___________________________ + +c = +s = +a = + + @gen_cluster( + nthreads=[("", 1)], + client=True, + worker_kwargs={"memory_limit": "2 GiB"}, + # ^ must be smaller than system memory limit, otherwise that will take precedence + config={ + "distributed.worker.memory.target": False, + "distributed.worker.memory.spill": 0.5, + "distributed.worker.memory.pause": 0.8, + "distributed.worker.memory.monitor-interval": "10ms", + }, + ) + async def test_pause_while_spilling(c, s, a): + N_PAUSE = 3 + N_TOTAL = 5 + + if a.memory_manager.memory_limit < parse_bytes("2 GiB"): + pytest.fail( + f"Set 2 GiB memory limit, got {format_bytes(a.memory_manager.memory_limit)}." + ) + + def get_process_memory(): + if len(a.data) < N_PAUSE: + # Don't trigger spilling until after some tasks have completed + return 0 + elif a.data.fast and not a.data.slow: + # Trigger spilling + return parse_bytes("1.6 GiB") + else: + # Trigger pause, but only after we started spilling + return parse_bytes("1.9 GiB") + + a.monitor.get_process_memory = get_process_memory + + class SlowSpill: + def __init__(self): + # We need to record the worker while we are inside a task; can't do it in + # __reduce__ or it will pick up an arbitrary one among all running workers + self.worker = distributed.get_worker() + while len(self.worker.data.fast) >= N_PAUSE: + sleep(0.01) + + def __reduce__(self): + paused = self.worker.status == Status.paused + if not paused: + sleep(0.1) + return bool, (paused,) + + futs = [c.submit(SlowSpill, pure=False) for _ in range(N_TOTAL)] + +> await async_poll_for(lambda: len(a.data.slow) >= N_PAUSE, timeout=5, period=0) + +distributed/tests/test_worker_memory.py:982: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +predicate = . at 0xffff2a2565c0> +timeout = 5, fail_func = None, period = 0 + + async def async_poll_for(predicate, timeout, fail_func=None, period=0.05): + deadline = time() + timeout + while not predicate(): + await asyncio.sleep(period) + if time() > deadline: + if fail_func is not None: + fail_func() +> pytest.fail(f"condition not reached until {timeout} seconds") +E Failed: condition not reached until 5 seconds + +distributed/utils_test.py:1232: Failed +----------------------------- Captured stderr call ----------------------------- +2025-03-12 04:16:46,648 - distributed.scheduler - INFO - State start +2025-03-12 04:16:46,677 - distributed.scheduler - INFO - Scheduler at: tcp://127.0.0.1:32775 +2025-03-12 04:16:46,692 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34675/status +2025-03-12 04:16:46,711 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:16:46,722 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:36609 +2025-03-12 04:16:46,725 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:36609 +2025-03-12 04:16:46,745 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:16:46,752 - distributed.worker - INFO - dashboard at: 127.0.0.1:42931 +2025-03-12 04:16:46,759 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:32775 +2025-03-12 04:16:46,769 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:16:46,772 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:16:46,787 - distributed.worker - INFO - Memory: 2.00 GiB +2025-03-12 04:16:46,794 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-gydocaa7 +2025-03-12 04:16:46,796 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:16:46,918 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:36609 name: 0 +2025-03-12 04:16:47,000 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:36609 +2025-03-12 04:16:47,015 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:45820 +2025-03-12 04:16:47,016 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:16:47,031 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:32775 +2025-03-12 04:16:47,038 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:16:47,041 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:32775 +2025-03-12 04:16:47,179 - distributed.scheduler - INFO - Receive client connection: Client-777b15aa-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:16:47,260 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:45826 +2025-03-12 04:16:53,987 - distributed.worker.memory - WARNING - gc.collect() took 1.720s. This is usually a sign that some tasks handle too many Python objects at the same time. Rechunking the work into smaller tasks might help. +2025-03-12 04:16:53,994 - distributed.worker.memory - WARNING - Worker is at 94% memory usage. Pausing worker. Process memory: 1.90 GiB -- Worker memory limit: 2.00 GiB +2025-03-12 04:16:54,012 - distributed.core - INFO - Event loop was unresponsive in Worker for 6.97s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:16:54,013 - distributed.core - INFO - Event loop was unresponsive in Scheduler for 6.75s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:16:54,068 - distributed.scheduler - INFO - Remove client Client-777b15aa-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:16:54,086 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:45826; closing. +2025-03-12 04:16:54,087 - distributed.scheduler - INFO - Remove client Client-777b15aa-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:16:54,104 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:36609. Reason: worker-close +2025-03-12 04:16:54,119 - distributed.scheduler - INFO - Close client connection: Client-777b15aa-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:16:54,138 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:16:54,159 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:45820; closing. +2025-03-12 04:16:54,159 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:36609 name: 0 (stimulus_id='handle-worker-cleanup-1741702614.1598766') +2025-03-12 04:16:54,174 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:16:54,177 - distributed.core - INFO - Connection to tcp://127.0.0.1:32775 has been closed. +2025-03-12 04:16:54,198 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:16:54,201 - distributed.scheduler - INFO - Scheduler closing all comms +____________________________ test_pause_while_idle _____________________________ + +s = +a = +b = + + @gen_cluster(config={"distributed.worker.memory.monitor-interval": "10ms"}) + async def test_pause_while_idle(s, a, b): + sa = s.workers[a.address] + assert a.address in s.idle + assert sa in s.running + + a.monitor.get_process_memory = lambda: 2**40 +> await async_poll_for(lambda: sa.status == Status.paused, timeout=5) + +distributed/tests/test_worker_memory.py:1105: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + +predicate = . at 0xfffeeaf2ba60> +timeout = 5, fail_func = None, period = 0.05 + + async def async_poll_for(predicate, timeout, fail_func=None, period=0.05): + deadline = time() + timeout + while not predicate(): + await asyncio.sleep(period) + if time() > deadline: + if fail_func is not None: + fail_func() +> pytest.fail(f"condition not reached until {timeout} seconds") +E Failed: condition not reached until 5 seconds + +distributed/utils_test.py:1232: Failed +----------------------------- Captured stderr call ----------------------------- +2025-03-12 04:17:16,712 - distributed.scheduler - INFO - State start +2025-03-12 04:17:16,734 - distributed.scheduler - INFO - Scheduler at: tcp://127.0.0.1:34919 +2025-03-12 04:17:16,736 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:39473/status +2025-03-12 04:17:16,751 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:17:16,798 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:35175 +2025-03-12 04:17:16,801 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:35175 +2025-03-12 04:17:16,811 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:17:16,822 - distributed.worker - INFO - dashboard at: 127.0.0.1:34779 +2025-03-12 04:17:16,825 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:34919 +2025-03-12 04:17:16,835 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:16,858 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:17:16,860 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:17:16,871 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-r2n5gmn1 +2025-03-12 04:17:16,873 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:16,885 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:42511 +2025-03-12 04:17:16,895 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:42511 +2025-03-12 04:17:16,914 - distributed.worker - INFO - Worker name: 1 +2025-03-12 04:17:16,916 - distributed.worker - INFO - dashboard at: 127.0.0.1:36797 +2025-03-12 04:17:16,927 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:34919 +2025-03-12 04:17:16,941 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:16,944 - distributed.worker - INFO - Threads: 2 +2025-03-12 04:17:16,955 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:17:16,957 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-yppkn1kt +2025-03-12 04:17:16,976 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:17,198 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:35175 name: 0 +2025-03-12 04:17:17,299 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:35175 +2025-03-12 04:17:17,314 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:55372 +2025-03-12 04:17:17,314 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:42511 name: 1 +2025-03-12 04:17:17,419 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:42511 +2025-03-12 04:17:17,430 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:55388 +2025-03-12 04:17:17,431 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:17:17,450 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:17:17,453 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:34919 +2025-03-12 04:17:17,464 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:17,479 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:34919 +2025-03-12 04:17:17,481 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:17,492 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:34919 +2025-03-12 04:17:17,493 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:34919 +2025-03-12 04:17:24,039 - distributed.worker.memory - WARNING - gc.collect() took 1.713s. This is usually a sign that some tasks handle too many Python objects at the same time. Rechunking the work into smaller tasks might help. +2025-03-12 04:17:24,054 - distributed.worker.memory - WARNING - Worker is at 1742% memory usage. Pausing worker. Process memory: 1.00 TiB -- Worker memory limit: 58.76 GiB +2025-03-12 04:17:24,070 - distributed.core - INFO - Event loop was unresponsive in Scheduler for 6.64s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:17:24,071 - distributed.core - INFO - Event loop was unresponsive in Worker for 6.59s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:17:24,071 - distributed.core - INFO - Event loop was unresponsive in Worker for 6.58s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:17:24,082 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:35175. Reason: worker-close +2025-03-12 04:17:24,084 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:42511. Reason: worker-close +2025-03-12 04:17:24,097 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:17:24,111 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:17:24,128 - distributed.worker - ERROR - Failed to communicate with scheduler during heartbeat. +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 225, in read + frames_nosplit_nbytes_bin = await stream.read_bytes(fmt_size) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory - s, ws = await start_cluster( - ^^^^^^^^^^^^^^^^^^^^ - ...<9 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster - await asyncio.gather(*workers) - File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable - return await awaitable - ^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start - raise asyncio.TimeoutError( - f"{type(self).__name__} start timed out after {timeout}s." - ) from exc -TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:26:23,004 - distributed.scheduler - INFO - State start -2026-04-13 07:26:23,018 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43969 -2026-04-13 07:26:23,020 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45311/status -2026-04-13 07:26:23,022 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:23,053 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37603'. Reason: failure-to-start- -2026-04-13 07:26:23,053 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37603' closed. -2026-04-13 07:26:23,053 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39551'. Reason: failure-to-start- -2026-04-13 07:26:23,053 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39551' closed. -2026-04-13 07:26:23,054 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43969': TLS handshake failed with remote 'tls://127.0.0.1:56536': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:23,054 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43969': TLS handshake failed with remote 'tls://127.0.0.1:56538': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:26:23,054 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1269, in heartbeat + response = await retry_operation( + ^^^^^^^^^^^^^^^^^^^^^^ + ...<14 lines>... + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 441, in retry_operation + return await retry( + ^^^^^^^^^^^^ + ...<5 lines>... + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 420, in retry + return await coro() + ^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1259, in send_recv_from_rpc + return await send_recv(comm=comm, op=key, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1018, in send_recv + response = await comm.read(deserializers=deserializers) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read + convert_stream_closed_error(self, e) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error + raise CommClosedError(f"in {obj}: {exc}") from exc +distributed.comm.core.CommClosedError: in : Stream is closed +2025-03-12 04:17:24,141 - distributed.worker - ERROR - Failed to communicate with scheduler during heartbeat. Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe - comm = await self.rpc.connect(saddr) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect - return await self._connect(addr=addr, timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect - comm = await connect( - ^^^^^^^^^^^^^^ - ...<4 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect - comm = await wait_for( - ^^^^^^^^^^^^^^^ - ...<2 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect - stream = await self.client.connect( - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ) - ^ - File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect - af, addr, stream = await connector.start(connect_timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -asyncio.exceptions.CancelledError + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 225, in read + frames_nosplit_nbytes_bin = await stream.read_bytes(fmt_size) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start - await wait_for(self.start_unsafe(), timeout=timeout) - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for - async with asyncio.timeout(timeout): - ~~~~~~~~~~~~~~~^^^^^^^^^ - File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__ - raise TimeoutError from exc_val -TimeoutError + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/worker.py", line 1269, in heartbeat + response = await retry_operation( + ^^^^^^^^^^^^^^^^^^^^^^ + ...<14 lines>... + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 441, in retry_operation + return await retry( + ^^^^^^^^^^^^ + ...<5 lines>... + ) + ^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_comm.py", line 420, in retry + return await coro() + ^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1259, in send_recv_from_rpc + return await send_recv(comm=comm, op=key, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1018, in send_recv + response = await comm.read(deserializers=deserializers) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 236, in read + convert_stream_closed_error(self, e) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error + raise CommClosedError(f"in {obj}: {exc}") from exc +distributed.comm.core.CommClosedError: in : Stream is closed +2025-03-12 04:17:24,170 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:55372; closing. +2025-03-12 04:17:24,171 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:35175 name: 0 (stimulus_id='handle-worker-cleanup-1741702644.1710517') +2025-03-12 04:17:24,181 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:55388; closing. +2025-03-12 04:17:24,182 - distributed.core - INFO - Connection to tcp://127.0.0.1:34919 has been closed. +2025-03-12 04:17:24,202 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:42511 name: 1 (stimulus_id='handle-worker-cleanup-1741702644.2022684') +2025-03-12 04:17:24,204 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:17:24,211 - distributed.core - INFO - Connection to tcp://127.0.0.1:34919 has been closed. +2025-03-12 04:17:24,234 - distributed.batched - INFO - Batched Comm Closed +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send + nbytes = yield coro + ^^^^^^^^^^ + File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run + value = future.result() + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 262, in write + raise CommClosedError() +distributed.comm.core.CommClosedError +2025-03-12 04:17:24,242 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:17:24,245 - distributed.scheduler - INFO - Scheduler closing all comms +__________________________ test_pause_while_saturated __________________________ + +c = +s = +a = +b = + + @gen_cluster(client=True, config={"distributed.worker.memory.monitor-interval": "10ms"}) + async def test_pause_while_saturated(c, s, a, b): + sa = s.workers[a.address] + ev = Event() + futs = c.map(lambda i, ev: ev.wait(), range(3), ev=ev, workers=[a.address]) + await async_poll_for(lambda: len(a.state.tasks) == 3, timeout=5) + assert sa in s.saturated + assert sa in s.running + + a.monitor.get_process_memory = lambda: 2**40 +> await async_poll_for(lambda: sa.status == Status.paused, timeout=5) -The above exception was the direct cause of the following exception: +distributed/tests/test_worker_memory.py:1125: +_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory - s, ws = await start_cluster( - ^^^^^^^^^^^^^^^^^^^^ - ...<9 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster - await asyncio.gather(*workers) - File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable - return await awaitable - ^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start - raise asyncio.TimeoutError( - f"{type(self).__name__} start timed out after {timeout}s." - ) from exc -TimeoutError: Nanny start timed out after 0s. -_____________________________ test_retire_workers ______________________________ +predicate = . at 0xffff0a779d00> +timeout = 5, fail_func = None, period = 0.05 -fut = , timeout = 0 + async def async_poll_for(predicate, timeout, fail_func=None, period=0.05): + deadline = time() + timeout + while not predicate(): + await asyncio.sleep(period) + if time() > deadline: + if fail_func is not None: + fail_func() +> pytest.fail(f"condition not reached until {timeout} seconds") +E Failed: condition not reached until 5 seconds + +distributed/utils_test.py:1232: Failed +----------------------------- Captured stderr call ----------------------------- +2025-03-12 04:17:24,468 - distributed.scheduler - INFO - State start +2025-03-12 04:17:24,490 - distributed.scheduler - INFO - Scheduler at: tcp://127.0.0.1:33871 +2025-03-12 04:17:24,493 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34787/status +2025-03-12 04:17:24,507 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:17:24,537 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:41959 +2025-03-12 04:17:24,551 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:41959 +2025-03-12 04:17:24,562 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:17:24,565 - distributed.worker - INFO - dashboard at: 127.0.0.1:41051 +2025-03-12 04:17:24,579 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:33871 +2025-03-12 04:17:24,590 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:24,592 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:17:24,607 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:17:24,618 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-33kramf_ +2025-03-12 04:17:24,620 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:24,636 - distributed.worker - INFO - Start worker at: tcp://127.0.0.1:34199 +2025-03-12 04:17:24,646 - distributed.worker - INFO - Listening to: tcp://127.0.0.1:34199 +2025-03-12 04:17:24,649 - distributed.worker - INFO - Worker name: 1 +2025-03-12 04:17:24,664 - distributed.worker - INFO - dashboard at: 127.0.0.1:33347 +2025-03-12 04:17:24,674 - distributed.worker - INFO - Waiting to connect to: tcp://127.0.0.1:33871 +2025-03-12 04:17:24,677 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:24,691 - distributed.worker - INFO - Threads: 2 +2025-03-12 04:17:24,702 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:17:24,705 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-f2qbtqq6 +2025-03-12 04:17:24,719 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:24,931 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:41959 name: 0 +2025-03-12 04:17:25,028 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:41959 +2025-03-12 04:17:25,043 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43420 +2025-03-12 04:17:25,043 - distributed.scheduler - INFO - Register worker addr: tcp://127.0.0.1:34199 name: 1 +2025-03-12 04:17:25,141 - distributed.scheduler - INFO - Starting worker compute stream, tcp://127.0.0.1:34199 +2025-03-12 04:17:25,155 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43436 +2025-03-12 04:17:25,170 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:17:25,173 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:17:25,184 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:33871 +2025-03-12 04:17:25,199 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:25,214 - distributed.worker - INFO - Registered to: tcp://127.0.0.1:33871 +2025-03-12 04:17:25,216 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:17:25,228 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:33871 +2025-03-12 04:17:25,228 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:33871 +2025-03-12 04:17:25,339 - distributed.scheduler - INFO - Receive client connection: Client-8e3ca257-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:17:25,440 - distributed.core - INFO - Starting established connection to tcp://127.0.0.1:43446 +2025-03-12 04:17:30,976 - distributed.worker.memory - WARNING - gc.collect() took 1.693s. This is usually a sign that some tasks handle too many Python objects at the same time. Rechunking the work into smaller tasks might help. +2025-03-12 04:17:30,986 - distributed.worker.memory - WARNING - Worker is at 1742% memory usage. Pausing worker. Process memory: 1.00 TiB -- Worker memory limit: 58.76 GiB +2025-03-12 04:17:30,990 - distributed.core - INFO - Event loop was unresponsive in Scheduler for 5.82s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:17:30,991 - distributed.core - INFO - Event loop was unresponsive in Worker for 5.78s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:17:30,992 - distributed.core - INFO - Event loop was unresponsive in Worker for 5.76s. This is often caused by long-running GIL-holding functions or moving large chunks of data. This can cause timeouts and instability. +2025-03-12 04:17:31,016 - distributed.scheduler - INFO - Remove client Client-8e3ca257-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:17:31,023 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43446; closing. +2025-03-12 04:17:31,023 - distributed.scheduler - INFO - Remove client Client-8e3ca257-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:17:31,035 - distributed.scheduler - INFO - Close client connection: Client-8e3ca257-fe83-11ef-9228-ad3bd1b43ef1 +2025-03-12 04:17:31,039 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:41959. Reason: worker-close +2025-03-12 04:17:31,050 - distributed.worker - INFO - Stopping worker at tcp://127.0.0.1:34199. Reason: worker-close +2025-03-12 04:17:31,058 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:17:31,081 - distributed.worker.state_machine - WARNING - Async instruction for > ended with CancelledError +2025-03-12 04:17:31,085 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:17:31,102 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43436; closing. +2025-03-12 04:17:31,103 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:34199 name: 1 (stimulus_id='handle-worker-cleanup-1741702651.1029983') +2025-03-12 04:17:31,105 - distributed.core - INFO - Connection to tcp://127.0.0.1:33871 has been closed. +2025-03-12 04:17:31,110 - distributed.core - INFO - Received 'close-stream' from tcp://127.0.0.1:43420; closing. +2025-03-12 04:17:31,110 - distributed.scheduler - INFO - Remove worker addr: tcp://127.0.0.1:41959 name: 0 (stimulus_id='handle-worker-cleanup-1741702651.1107385') +2025-03-12 04:17:31,113 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:17:31,124 - distributed.core - INFO - Connection to tcp://127.0.0.1:33871 has been closed. +2025-03-12 04:17:31,139 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:17:31,142 - distributed.scheduler - INFO - Scheduler closing all comms +2025-03-12 04:17:31,145 - distributed.event - ERROR - +Traceback (most recent call last): + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 809, in wrapper + return await func(*args, **kwargs) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/event.py", line 77, in event_wait + await future + File "/usr/lib/python3.13/asyncio/locks.py", line 213, in wait + await fut +asyncio.exceptions.CancelledError +============================= slowest 20 durations ============================= +60.01s call distributed/tests/test_tls_functional.py::test_nanny +43.21s call distributed/tests/test_gc.py::test_gc_diagnosis_cpu_time +25.03s call distributed/tests/test_client.py::test_release_persisted_collection +21.65s call distributed/tests/test_nanny.py::test_num_fds +16.46s call distributed/tests/test_stress.py::test_cancel_stress +14.54s call distributed/shuffle/tests/test_rechunk.py::test_rechunk_method +14.47s call distributed/diagnostics/tests/test_progress.py::test_group_timing +13.82s call distributed/diagnostics/tests/test_progress.py::test_AllProgress +12.29s call distributed/tests/test_nanny.py::test_nanny_restart +12.27s call distributed/tests/test_scheduler.py::test_balance_many_workers_2 +11.96s call distributed/tests/test_stress.py::test_cancel_stress_sync +11.70s call distributed/cli/tests/test_tls_cli.py::test_separate_key_cert +11.44s call distributed/tests/test_gc.py::test_gc_diagnosis_rss_win +11.38s call distributed/deploy/tests/test_subprocess.py::test_scale_up_and_down +11.36s call distributed/cli/tests/test_tls_cli.py::test_sni +11.29s call distributed/cli/tests/test_dask_scheduler.py::test_interface +11.25s call distributed/cli/tests/test_tls_cli.py::test_use_config_file +11.18s call distributed/tests/test_failed_workers.py::test_multiple_clients_restart +11.11s call distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x5-chunks5] +10.95s call distributed/tests/test_failed_workers.py::test_forgotten_futures_dont_clean_up_new_futures +=========================== short test summary info ============================ +SKIPPED [1] distributed/cli/tests/test_dask_ssh.py:9: could not import 'paramiko': No module named 'paramiko' +SKIPPED [1] distributed/comm/tests/test_ucx.py:15: could not import 'ucp': No module named 'ucp' +SKIPPED [1] distributed/comm/tests/test_ucx_config.py:23: could not import 'ucp': No module named 'ucp' +SKIPPED [1] distributed/dashboard/tests/test_components.py:5: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/dashboard/tests/test_scheduler_bokeh.py:11: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/dashboard/tests/test_worker_bokeh.py:10: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/deploy/tests/test_old_ssh.py:7: could not import 'paramiko': No module named 'paramiko' +SKIPPED [1] distributed/deploy/tests/test_ssh.py:5: could not import 'asyncssh': No module named 'asyncssh' +SKIPPED [1] distributed/diagnostics/tests/test_cudf_diagnostics.py:20: could not import 'cudf': No module named 'cudf' +SKIPPED [1] distributed/diagnostics/tests/test_memray.py:5: could not import 'memray': No module named 'memray' +SKIPPED [1] distributed/diagnostics/tests/test_nvml.py:11: could not import 'pynvml': No module named 'pynvml' +SKIPPED [1] distributed/diagnostics/tests/test_progress_stream.py:5: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/diagnostics/tests/test_progress_widgets.py:14: could not import 'ipywidgets': No module named 'ipywidgets' +SKIPPED [1] distributed/diagnostics/tests/test_rmm_diagnostics.py:14: could not import 'dask_cuda': No module named 'dask_cuda' +SKIPPED [1] distributed/protocol/tests/test_arrow.py:5: could not import 'pyarrow': No module named 'pyarrow' +SKIPPED [1] distributed/protocol/tests/test_cupy.py:11: could not import 'cupy': No module named 'cupy' +SKIPPED [1] distributed/protocol/tests/test_h5py.py:8: could not import 'h5py': No module named 'h5py' +SKIPPED [1] distributed/protocol/tests/test_keras.py:5: could not import 'keras': No module named 'keras' +SKIPPED [1] distributed/protocol/tests/test_netcdf4.py:5: could not import 'netCDF4': No module named 'netCDF4' +SKIPPED [1] distributed/protocol/tests/test_numba.py:11: could not import 'numba.cuda': No module named 'numba' +SKIPPED [1] distributed/protocol/tests/test_rmm.py:10: could not import 'numba.cuda': No module named 'numba' +SKIPPED [1] distributed/protocol/tests/test_scipy.py:8: could not import 'scipy': No module named 'scipy' +SKIPPED [1] distributed/protocol/tests/test_sparse.py:6: could not import 'sparse': No module named 'sparse' +SKIPPED [1] distributed/protocol/tests/test_torch.py:8: could not import 'torch': No module named 'torch' +SKIPPED [1] distributed/shuffle/tests/test_graph.py:9: could not import 'pyarrow': No module named 'pyarrow' +SKIPPED [1] distributed/shuffle/tests/test_merge.py:71: could not import 'pyarrow': No module named 'pyarrow' +SKIPPED [1] distributed/shuffle/tests/test_merge_column_and_index.py:110: could not import 'pyarrow': No module named 'pyarrow' +SKIPPED [1] distributed/shuffle/tests/test_metrics.py:71: could not import 'pyarrow': No module named 'pyarrow' +SKIPPED [1] distributed/shuffle/tests/test_shuffle.py:195: could not import 'pyarrow': No module named 'pyarrow' +SKIPPED [1] distributed/tests/test_parse_stdout.py:14: could not import 'parse_stdout': No module named 'parse_stdout' +SKIPPED [1] distributed/cli/tests/test_dask_scheduler.py:552: need --runslow option to run +SKIPPED [1] distributed/cli/tests/test_dask_scheduler.py:564: need --runslow option to run +SKIPPED [2] distributed/cli/tests/test_dask_scheduler.py:662: need --runslow option to run +SKIPPED [2] distributed/cli/tests/test_dask_worker.py:616: need --runslow option to run +SKIPPED [1] distributed/deploy/tests/test_subprocess.py:77: need --runslow option to run +SKIPPED [1] distributed/deploy/tests/test_subprocess.py:92: Windows-specific error testing (distributed#7434) +SKIPPED [1] distributed/diagnostics/tests/test_eventstream.py:16: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/diagnostics/tests/test_install_plugin.py:143: need --runslow option to run +SKIPPED [1] distributed/diagnostics/tests/test_install_plugin.py:151: need --runslow option to run +SKIPPED [1] distributed/diagnostics/tests/test_install_plugin.py:158: need --runslow option to run +SKIPPED [2] distributed/diagnostics/tests/test_memory_sampler.py:103: could not import 'matplotlib': No module named 'matplotlib' +SKIPPED [1] distributed/diagnostics/tests/test_task_stream.py:140: could not import 'bokeh.models': No module named 'bokeh' +SKIPPED [1] distributed/diagnostics/tests/test_task_stream.py:151: could not import 'bokeh.models': No module named 'bokeh' +SKIPPED [1] distributed/http/scheduler/tests/test_missing_bokeh.py:35: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/http/scheduler/tests/test_missing_bokeh.py:59: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:87: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:535: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:648: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:689: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:709: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:732: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:754: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:775: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/worker/tests/test_worker_http.py:150: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/worker/tests/test_worker_http.py:163: could not import 'aiohttp': No module named 'aiohttp' +SKIPPED [1] distributed/http/worker/tests/test_worker_http.py:234: Fails on 32-bit, seems to be large memory request +SKIPPED [4] distributed/protocol/tests/test_collection_cuda.py:12: could not import 'cupy': No module named 'cupy' +SKIPPED [4] distributed/protocol/tests/test_collection_cuda.py:44: could not import 'cudf': No module named 'cudf' +SKIPPED [1] distributed/protocol/tests/test_compression.py:125: could not import 'lz4': No module named 'lz4' +SKIPPED [2] distributed/protocol/tests/test_compression.py:132: could not import 'lz4': No module named 'lz4' +SKIPPED [2] distributed/protocol/tests/test_compression.py:132: could not import 'snappy': No module named 'snappy' +SKIPPED [2] distributed/protocol/tests/test_compression.py:132: could not import 'zstandard': No module named 'zstandard' +SKIPPED [1] distributed/protocol/tests/test_compression.py:143: could not import 'lz4': No module named 'lz4' +SKIPPED [1] distributed/protocol/tests/test_compression.py:143: could not import 'snappy': No module named 'snappy' +SKIPPED [1] distributed/protocol/tests/test_compression.py:143: could not import 'zstandard': No module named 'zstandard' +SKIPPED [1] distributed/protocol/tests/test_compression.py:151: could not import 'lz4': No module named 'lz4' +SKIPPED [1] distributed/protocol/tests/test_compression.py:151: could not import 'snappy': No module named 'snappy' +SKIPPED [1] distributed/protocol/tests/test_compression.py:151: could not import 'zstandard': No module named 'zstandard' +SKIPPED [10] distributed/protocol/tests/test_compression.py:168: need --runslow option to run +SKIPPED [5] distributed/protocol/tests/test_compression.py:188: need --runslow option to run +SKIPPED [1] distributed/protocol/tests/test_numpy.py:150: could not import 'numpy.core.test_rational': No module named 'numpy.core.test_rational' +SKIPPED [1] distributed/protocol/tests/test_numpy.py:180: need --runslow option to run +SKIPPED [1] distributed/protocol/tests/test_pickle.py:275: need --runslow option to run +SKIPPED [3] distributed/protocol/tests/test_protocol.py:219: need --runslow option to run +SKIPPED [1] distributed/shuffle/tests/test_rechunk.py:1326: need --runslow option to run +SKIPPED [1] distributed/tests/test_active_memory_manager.py:1304: need --runslow option to run +SKIPPED [1] distributed/tests/test_active_memory_manager.py:1317: need --runslow option to run +SKIPPED [1] distributed/tests/test_active_memory_manager.py:1339: need --runslow option to run +SKIPPED [2] distributed/tests/test_active_memory_manager.py:1360: need --runslow option to run +SKIPPED [1] distributed/tests/test_batched.py:158: need --runslow option to run +SKIPPED [1] distributed/tests/test_batched.py:228: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:859: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:1707: Upstream issue in CPython. See https://github.com/dask/distributed/issues/8708 and https://github.com/python/cpython/issues/121342. +SKIPPED [1] distributed/tests/test_client.py:1736: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:1870: unconditional skip +SKIPPED [1] distributed/tests/test_client.py:2088: unconditional skip +SKIPPED [1] distributed/tests/test_client.py:2677: Use fast random selection now +SKIPPED [1] distributed/tests/test_client.py:3301: unconditional skip +SKIPPED [1] distributed/tests/test_client.py:3585: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:3650: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:3762: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:4563: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:4667: Now prefer first-in-first-out +SKIPPED [1] distributed/tests/test_client.py:4697: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:4804: need --runslow option to run +SKIPPED [2] distributed/tests/test_client.py:4849: need --runslow option to run +SKIPPED [2] distributed/tests/test_client.py:4869: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:4896: need --runslow option to run +SKIPPED [2] distributed/tests/test_client.py:4909: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:5097: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:5118: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:5417: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:5643: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:5844: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:5961: unconditional skip +SKIPPED [1] distributed/tests/test_client.py:6179: could not import 'bokeh.plotting': No module named 'bokeh' +SKIPPED [1] distributed/tests/test_client.py:6540: known intermittent failure +SKIPPED [2] distributed/tests/test_client.py:6621: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:6695: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:7065: could not import 'bokeh': No module named 'bokeh' +SKIPPED [2] distributed/tests/test_client.py:7166: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:7265: numpy >=1.25 can capture ufunc code +SKIPPED [1] distributed/tests/test_client.py:7413: need --runslow option to run +SKIPPED [2] distributed/tests/test_client.py:8031: need --runslow option to run +SKIPPED [1] distributed/tests/test_client.py:8235: need --runslow option to run +SKIPPED [2] distributed/tests/test_client.py:8286: need --runslow option to run +SKIPPED [1] distributed/tests/test_client_executor.py:146: need --runslow option to run +SKIPPED [1] distributed/tests/test_config.py:356: could not import 'uvloop': No module named 'uvloop' +SKIPPED [1] distributed/tests/test_core.py:450: need --runslow option to run +SKIPPED [1] distributed/tests/test_core.py:915: could not import 'crick': No module named 'crick' +SKIPPED [1] distributed/tests/test_core.py:924: could not import 'crick': No module named 'crick' +SKIPPED [1] distributed/tests/test_core.py:1386: need --runslow option to run +SKIPPED [1] distributed/tests/test_counter.py:13: no crick library +SKIPPED [1] distributed/tests/test_dask_collections.py:221: could not import 'sparse': No module named 'sparse' +SKIPPED [1] distributed/tests/test_diskutils.py:224: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:41: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:117: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:128: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:287: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:358: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:442: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:454: need --runslow option to run +SKIPPED [2] distributed/tests/test_failed_workers.py:491: need --runslow option to run +SKIPPED [1] distributed/tests/test_failed_workers.py:537: need --runslow option to run +SKIPPED [1] distributed/tests/test_jupyter.py:48: need --runslow option to run +SKIPPED [1] distributed/tests/test_metrics.py:30: WindowsTime doesn't work with high accuracy base timer +SKIPPED [1] distributed/tests/test_nanny.py:93: need --runslow option to run +SKIPPED [1] distributed/tests/test_nanny.py:107: need --runslow option to run +SKIPPED [1] distributed/tests/test_nanny.py:141: need --runslow option to run +SKIPPED [1] distributed/tests/test_nanny.py:291: need --runslow option to run +SKIPPED [1] distributed/tests/test_nanny.py:490: need --runslow option to run +SKIPPED [2] distributed/tests/test_nanny.py:567: could not import 'ucp': No module named 'ucp' +SKIPPED [1] distributed/tests/test_nanny.py:682: need --runslow option to run +SKIPPED [1] distributed/tests/test_nanny.py:690: need --runslow option to run +SKIPPED [1] distributed/tests/test_nanny.py:731: need --runslow option to run +SKIPPED [1] distributed/tests/test_nanny.py:774: need --runslow option to run +SKIPPED [1] distributed/tests/test_profile.py:75: could not import 'stacktrace': No module named 'stacktrace' +SKIPPED [1] distributed/tests/test_queues.py:114: need --runslow option to run +SKIPPED [1] distributed/tests/test_resources.py:363: Skipped +SKIPPED [1] distributed/tests/test_resources.py:422: Should protect resource keys from optimization +SKIPPED [1] distributed/tests/test_resources.py:445: atop fusion seemed to break this +SKIPPED [1] distributed/tests/test_scheduler.py:286: Not relevant with queuing on; see https://github.com/dask/distributed/issues/7204 +SKIPPED [1] distributed/tests/test_scheduler.py:421: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1075: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1103: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1124: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1216: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1234: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1625: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1680: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1697: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1860: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:1900: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:2095: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:2356: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:2481: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:2508: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:2556: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:2606: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:2661: could not import 'bokeh': No module named 'bokeh' +SKIPPED [1] distributed/tests/test_scheduler.py:3112: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:3512: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:3691: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:3723: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:3866: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:3923: need --runslow option to run +SKIPPED [1] distributed/tests/test_scheduler.py:3943: need --runslow option to run +SKIPPED [1] distributed/tests/test_steal.py:106: need --runslow option to run +SKIPPED [1] distributed/tests/test_steal.py:285: Skipped +SKIPPED [1] distributed/tests/test_steal.py:1287: executing heartbeats not considered yet +SKIPPED [1] distributed/tests/test_steal.py:1353: need --runslow option to run +SKIPPED [2] distributed/tests/test_stress.py:51: need --runslow option to run +SKIPPED [1] distributed/tests/test_stress.py:100: need --runslow option to run +SKIPPED [1] distributed/tests/test_stress.py:209: unconditional skip +SKIPPED [1] distributed/tests/test_stress.py:240: need --runslow option to run +SKIPPED [1] distributed/tests/test_stress.py:269: need --runslow option to run +SKIPPED [1] distributed/tests/test_stress.py:310: need --runslow option to run +SKIPPED [1] distributed/tests/test_system_monitor.py:111: could not import 'gilknocker': No module named 'gilknocker' +SKIPPED [1] distributed/tests/test_utils.py:166: could not import 'IPython': No module named 'IPython' +SKIPPED [1] distributed/tests/test_utils.py:347: could not import 'pyarrow': No module named 'pyarrow' +SKIPPED [1] distributed/tests/test_utils_test.py:146: This hangs on travis +SKIPPED [1] distributed/tests/test_utils_test.py:721: need --runslow option to run +SKIPPED [1] distributed/tests/test_variable.py:205: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:219: don't yet support uploading pyc files +SKIPPED [1] distributed/tests/test_worker.py:309: could not import 'crick': No module named 'crick' +SKIPPED [1] distributed/tests/test_worker.py:345: need --runslow option to run +SKIPPED [8] distributed/tests/test_worker.py:420: need --runslow option to run +SKIPPED [5] distributed/tests/test_worker.py:562: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1180: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1264: need --runslow option to run +SKIPPED [2] distributed/tests/test_worker.py:1453: could not import 'ucp': No module named 'ucp' +SKIPPED [1] distributed/tests/test_worker.py:1517: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1549: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1580: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1607: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1635: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1702: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:1806: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:2705: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:3399: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker.py:3429: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker_memory.py:871: need --runslow option to run +SKIPPED [2] distributed/tests/test_worker_memory.py:883: need --runslow option to run +SKIPPED [1] distributed/tests/test_worker_memory.py:997: need --runslow option to run +FAILED distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp - assert 2 == 1 + + where 2 = len([, ]) +FAILED distributed/tests/test_client.py::test_client_disconnect_exception_on_cancelled_futures - OSError: Timed out trying to connect to tcp://127.0.0.1:35317 after 5 s +FAILED distributed/tests/test_tls_functional.py::test_nanny - TimeoutError +FAILED distributed/tests/test_worker.py::test_log_remove_worker - AssertionError: assert {'tcp://127.0.0.1:41087': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'all': [{'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'stimulus_id': 'graceful', 'expected': True, 'worker': 'tcp://127.0.0.1:41087'}, {'action': 'retire-workers', 'retired': {'tcp://127.0.0.1:41087': 'snip'}, 'could-not-retire': {}, 'stimulus_id': 'graceful'}, {'action': 'retire-workers', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:45237': 'snip'}, 'stimulus_id': 'graceful_abort'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False, 'worker': 'tcp://127.0.0.1:45237'}], 'tcp://127.0.0.1:45237': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'processing-tasks': {'y'}, 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'stimulus_id': 'ungraceful', 'expected': False}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:41087'}, {'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} == {'tcp://127.0.0.1:41087': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful'}, {'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful'}, {'action': 'retired', 'stimulus_id': 'graceful'}], 'tcp://127.0.0.1:45237': [{'action': 'worker-status-change', 'prev-status': 'running', 'status': 'closing_gracefully', 'stimulus_id': 'graceful_abort'}, {'action': 'could-not-retire', 'stimulus_id': 'graceful_abort'}, {'action': 'worker-status-change', 'prev-status': 'closing_gracefully', 'status': 'running', 'stimulus_id': 'worker-status-change'}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful'}, {'action': 'closing-worker', 'reason': 'scheduler-remove-worker'}], 'all': [{'action': 'remove-worker', 'lost-computed-tasks': set(), 'lost-scattered-tasks': set(), 'processing-tasks': {'y'}, 'expected': True, 'stimulus_id': 'graceful', 'worker': 'tcp://127.0.0.1:41087'}, {'action': 'retire-workers', 'stimulus_id': 'graceful', 'retired': {'tcp://127.0.0.1:41087': 'snip'}, 'could-not-retire': {}}, {'action': 'retire-workers', 'stimulus_id': 'graceful_abort', 'retired': {}, 'could-not-retire': {'tcp://127.0.0.1:45237': 'snip'}}, {'action': 'remove-worker', 'lost-computed-tasks': {'x'}, 'lost-scattered-tasks': {'z'}, 'processing-tasks': {'y'}, 'expected': False, 'stimulus_id': 'ungraceful', 'worker': 'tcp://127.0.0.1:45237'}], 'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} + + Common items: + {'all': [{'action': 'remove-worker', + 'expected': True, + 'lost-computed-tasks': set(), + 'lost-scattered-tasks': set(), + 'processing-tasks': {'y'}, + 'stimulus_id': 'graceful', + 'worker': 'tcp://127.0.0.1:41087'}, + {'action': 'retire-workers', + 'could-not-retire': {}, + 'retired': {'tcp://127.0.0.1:41087': 'snip'}, + 'stimulus_id': 'graceful'}, + {'action': 'retire-workers', + 'could-not-retire': {'tcp://127.0.0.1:45237': 'snip'}, + 'retired': {}, + 'stimulus_id': 'graceful_abort'}, + {'action': 'remove-worker', + 'expected': False, + 'lost-computed-tasks': {'x'}, + 'lost-scattered-tasks': {'z'}, + 'processing-tasks': {'y'}, + 'stimulus_id': 'ungraceful', + 'worker': 'tcp://127.0.0.1:45237'}], + 'tcp://127.0.0.1:41087': [{'action': 'worker-status-change', + 'prev-status': 'running', + 'status': 'closing_gracefully', + 'stimulus_id': 'graceful'}, + {'action': 'remove-worker', + 'expected': True, + 'lost-computed-tasks': set(), + 'lost-scattered-tasks': set(), + 'processing-tasks': {'y'}, + 'stimulus_id': 'graceful'}, + {'action': 'retired', 'stimulus_id': 'graceful'}], + 'tcp://127.0.0.1:45237': [{'action': 'worker-status-change', + 'prev-status': 'running', + 'status': 'closing_gracefully', + 'stimulus_id': 'graceful_abort'}, + {'action': 'could-not-retire', + 'stimulus_id': 'graceful_abort'}, + {'action': 'worker-status-change', + 'prev-status': 'closing_gracefully', + 'status': 'running', + 'stimulus_id': 'worker-status-change'}, + {'action': 'remove-worker', + 'expected': False, + 'lost-computed-tasks': {'x'}, + 'lost-scattered-tasks': {'z'}, + 'processing-tasks': {'y'}, + 'stimulus_id': 'ungraceful'}, + {'action': 'closing-worker', + 'reason': 'scheduler-remove-worker'}]} + Differing items: + {'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:41087'}, {'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} != {'worker-get-client': [{'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', 'timeout': 5, 'worker': 'tcp://127.0.0.1:45237'}]} + + Full diff: + { + 'all': [ + { + 'action': 'remove-worker', + 'expected': True, + 'lost-computed-tasks': set(), + 'lost-scattered-tasks': set(), + 'processing-tasks': { + 'y', + }, + 'stimulus_id': 'graceful', + 'worker': 'tcp://127.0.0.1:41087', + }, + { + 'action': 'retire-workers', + 'could-not-retire': {}, + 'retired': { + 'tcp://127.0.0.1:41087': 'snip', + }, + 'stimulus_id': 'graceful', + }, + { + 'action': 'retire-workers', + 'could-not-retire': { + 'tcp://127.0.0.1:45237': 'snip', + }, + 'retired': {}, + 'stimulus_id': 'graceful_abort', + }, + { + 'action': 'remove-worker', + 'expected': False, + 'lost-computed-tasks': { + 'x', + }, + 'lost-scattered-tasks': { + 'z', + }, + 'processing-tasks': { + 'y', + }, + 'stimulus_id': 'ungraceful', + 'worker': 'tcp://127.0.0.1:45237', + }, + ], + 'tcp://127.0.0.1:41087': [ + { + 'action': 'worker-status-change', + 'prev-status': 'running', + 'status': 'closing_gracefully', + 'stimulus_id': 'graceful', + }, + { + 'action': 'remove-worker', + 'expected': True, + 'lost-computed-tasks': set(), + 'lost-scattered-tasks': set(), + 'processing-tasks': { + 'y', + }, + 'stimulus_id': 'graceful', + }, + { + 'action': 'retired', + 'stimulus_id': 'graceful', + }, + ], + 'tcp://127.0.0.1:45237': [ + { + 'action': 'worker-status-change', + 'prev-status': 'running', + 'status': 'closing_gracefully', + 'stimulus_id': 'graceful_abort', + }, + { + 'action': 'could-not-retire', + 'stimulus_id': 'graceful_abort', + }, + { + 'action': 'worker-status-change', + 'prev-status': 'closing_gracefully', + 'status': 'running', + 'stimulus_id': 'worker-status-change', + }, + { + 'action': 'remove-worker', + 'expected': False, + 'lost-computed-tasks': { + 'x', + }, + 'lost-scattered-tasks': { + 'z', + }, + 'processing-tasks': { + 'y', + }, + 'stimulus_id': 'ungraceful', + }, + { + 'action': 'closing-worker', + 'reason': 'scheduler-remove-worker', + }, + ], + 'worker-get-client': [ + { + 'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', + 'timeout': 5, + + 'worker': 'tcp://127.0.0.1:41087', + + }, + + { + + 'client': 'Client-2cb2989b-fe83-11ef-9228-ad3bd1b43ef1', + + 'timeout': 5, + 'worker': 'tcp://127.0.0.1:45237', + }, + ], + } +FAILED distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill - TimeoutError +FAILED distributed/tests/test_worker_memory.py::test_pause_while_spilling - Failed: condition not reached until 5 seconds +FAILED distributed/tests/test_worker_memory.py::test_pause_while_idle - Failed: condition not reached until 5 seconds +FAILED distributed/tests/test_worker_memory.py::test_pause_while_saturated - Failed: condition not reached until 5 seconds += 8 failed, 2904 passed, 265 skipped, 222 deselected, 15 xfailed, 8 xpassed, 3 rerun in 3601.38s (1:00:01) = +*** END OF RUN 1: NOT ALL TESTS HAVE YET PASSED/XFAILED *** +*** STARTING RUN 2: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task +============================= test session starts ============================== +platform linux -- Python 3.13.2, pytest-8.3.4, pluggy-1.5.0 -- /usr/bin/python3.13 +cachedir: .pytest_cache +rootdir: /build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build +configfile: pyproject.toml +plugins: timeout-2.3.1, rerunfailures-15.0, anyio-4.7.0, typeguard-4.4.2 +timeout: 300.0s +timeout method: signal +timeout func_only: False +collecting ... collected 12 items / 4 deselected / 8 selected +run-last-failure: rerun previous 8 failures (skipped 145 files) + +distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp PASSED [ 12%] +distributed/tests/test_client.py::test_client_disconnect_exception_on_cancelled_futures PASSED [ 25%] +distributed/tests/test_tls_functional.py::test_nanny FAILED [ 37%] +distributed/tests/test_worker.py::test_log_remove_worker PASSED [ 50%] +distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill PASSED [ 62%] +distributed/tests/test_worker_memory.py::test_pause_while_spilling PASSED [ 75%] +distributed/tests/test_worker_memory.py::test_pause_while_idle PASSED [ 87%] +distributed/tests/test_worker_memory.py::test_pause_while_saturated PASSED [100%] + +=================================== FAILURES =================================== +__________________________________ test_nanny __________________________________ + +fut = , timeout = 0 async def wait_for(fut: Awaitable[T], timeout: float) -> T: async with asyncio.timeout(timeout): @@ -9108,10 +10731,10 @@ stream = await self.client.connect( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -self = -host = '127.0.0.1', port = 41121, af = -ssl_options = -max_buffer_size = 31544631296.0, source_ip = None, source_port = None +self = +host = '127.0.0.1', port = 34531, af = +ssl_options = +max_buffer_size = 31544633344.0, source_ip = None, source_port = None timeout = None async def connect( @@ -9207,7 +10830,7 @@ self = exc_type = -exc_val = CancelledError(), exc_tb = +exc_val = CancelledError(), exc_tb = async def __aexit__( self, @@ -9292,7 +10915,7 @@ During handling of the above exception, another exception occurred: -fut = ._..test_func..async_fn at 0xffff477f5a80> +fut = ._..test_func..async_fn at 0xffff9e122880> timeout = 60 async def wait_for(fut: Awaitable[T], timeout: float) -> T: @@ -9364,7 +10987,7 @@ self = exc_type = -exc_val = CancelledError(), exc_tb = +exc_val = CancelledError(), exc_tb = async def __aexit__( self, @@ -9390,142 +11013,89 @@ /usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError ----------------------------- Captured stderr call ----------------------------- -2026-04-13 07:26:30,078 - distributed.scheduler - INFO - State start -2026-04-13 07:26:30,092 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34455 -2026-04-13 07:26:30,094 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44277/status -2026-04-13 07:26:30,100 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:26:30,163 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:46095' -2026-04-13 07:26:30,191 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:39739' -2026-04-13 07:26:31,156 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089262592 due to system memory limit of 58.76 GiB -2026-04-13 07:26:31,177 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:34433 -2026-04-13 07:26:31,177 - distributed.worker - INFO - Listening to: tls://127.0.0.1:34433 -2026-04-13 07:26:31,177 - distributed.worker - INFO - Worker name: 1 -2026-04-13 07:26:31,178 - distributed.worker - INFO - dashboard at: 127.0.0.1:43107 -2026-04-13 07:26:31,178 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:34455 -2026-04-13 07:26:31,178 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:26:31,178 - distributed.worker - INFO - Threads: 2 -2026-04-13 07:26:31,178 - distributed.worker - INFO - Memory: 58.76 GiB -2026-04-13 07:26:31,178 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-79oejwin -2026-04-13 07:26:31,178 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:26:31,615 - distributed.worker.memory - WARNING - Ignoring provided memory limit 63089262592 due to system memory limit of 58.76 GiB -2026-04-13 07:26:31,665 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:33607 -2026-04-13 07:26:31,666 - distributed.worker - INFO - Listening to: tls://127.0.0.1:33607 -2026-04-13 07:26:31,666 - distributed.worker - INFO - Worker name: 0 -2026-04-13 07:26:31,666 - distributed.worker - INFO - dashboard at: 127.0.0.1:33315 -2026-04-13 07:26:31,666 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:34455 -2026-04-13 07:26:31,666 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:26:31,666 - distributed.worker - INFO - Threads: 1 -2026-04-13 07:26:31,666 - distributed.worker - INFO - Memory: 58.76 GiB -2026-04-13 07:26:31,666 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-2ilxr80v -2026-04-13 07:26:31,666 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:26:31,890 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:34433 name: 1 -2026-04-13 07:26:31,956 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:34433 -2026-04-13 07:26:31,957 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:52372 -2026-04-13 07:26:31,959 - distributed.worker - INFO - Starting Worker plugin shuffle -2026-04-13 07:26:31,960 - distributed.worker - INFO - Registered to: tls://127.0.0.1:34455 -2026-04-13 07:26:31,960 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:26:31,978 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:34455 -2026-04-13 07:26:32,348 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:33607 name: 0 -2026-04-13 07:26:32,386 - distributed.worker - INFO - Starting Worker plugin shuffle -2026-04-13 07:26:32,387 - distributed.worker - INFO - Registered to: tls://127.0.0.1:34455 -2026-04-13 07:26:32,387 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:26:32,394 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:33607 -2026-04-13 07:26:32,396 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:52378 -2026-04-13 07:26:32,397 - distributed.core - INFO - Connection to tls://127.0.0.1:52378 has been closed. -2026-04-13 07:26:32,397 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:33607 name: 0 (stimulus_id='handle-worker-cleanup-1776108392.397511') -2026-04-13 07:26:32,405 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:34455 -2026-04-13 07:26:32,405 - distributed.core - INFO - Connection to tls://127.0.0.1:34455 has been closed. -2026-04-13 07:26:32,406 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:33607. Reason: worker-handle-scheduler-connection-broken -2026-04-13 07:26:32,429 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:46095'. Reason: worker-handle-scheduler-connection-broken -2026-04-13 07:26:32,430 - distributed.worker - INFO - Removing Worker plugin shuffle -2026-04-13 07:26:32,447 - distributed.nanny - INFO - Worker closed -2026-04-13 07:26:35,075 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46095'. Reason: nanny-close-gracefully -2026-04-13 07:26:35,075 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46095' closed. -2026-04-13 07:27:02,415 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39739'. Reason: nanny-close -2026-04-13 07:27:02,415 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close -2026-04-13 07:27:02,424 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:34433. Reason: nanny-close -2026-04-13 07:27:02,424 - distributed.worker - INFO - Removing Worker plugin shuffle -2026-04-13 07:27:02,426 - distributed.core - INFO - Connection to tls://127.0.0.1:34455 has been closed. -2026-04-13 07:27:02,427 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:52372; closing. -2026-04-13 07:27:02,428 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:34433 name: 1 (stimulus_id='handle-worker-cleanup-1776108422.427955') -2026-04-13 07:27:02,430 - distributed.scheduler - INFO - Lost all workers -2026-04-13 07:27:02,439 - distributed.nanny - INFO - Worker closed -2026-04-13 07:27:02,831 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39739' closed. -2026-04-13 07:27:02,831 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown -2026-04-13 07:27:02,833 - distributed.scheduler - INFO - Scheduler closing all comms -2026-04-13 07:27:02,835 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying +2025-03-12 04:19:24,567 - distributed.scheduler - INFO - State start +2025-03-12 04:19:24,591 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45549 +2025-03-12 04:19:24,591 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35957/status +2025-03-12 04:19:24,591 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:19:24,736 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:41285' +2025-03-12 04:19:24,826 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:35819' +2025-03-12 04:19:26,768 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:41603 +2025-03-12 04:19:26,769 - distributed.worker - INFO - Listening to: tls://127.0.0.1:41603 +2025-03-12 04:19:26,769 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:19:26,769 - distributed.worker - INFO - dashboard at: 127.0.0.1:41779 +2025-03-12 04:19:26,769 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:45549 +2025-03-12 04:19:26,769 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:19:26,769 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:19:26,769 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:19:26,769 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-2pm9kwqu +2025-03-12 04:19:26,769 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:19:26,909 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:45735 +2025-03-12 04:19:26,909 - distributed.worker - INFO - Listening to: tls://127.0.0.1:45735 +2025-03-12 04:19:26,909 - distributed.worker - INFO - Worker name: 1 +2025-03-12 04:19:26,909 - distributed.worker - INFO - dashboard at: 127.0.0.1:40135 +2025-03-12 04:19:26,918 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:45549 +2025-03-12 04:19:26,918 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:19:26,918 - distributed.worker - INFO - Threads: 2 +2025-03-12 04:19:26,918 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:19:26,918 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-ly_rcuqn +2025-03-12 04:19:26,918 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:19:27,595 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:41603 name: 0 +2025-03-12 04:19:27,596 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:41603 +2025-03-12 04:19:27,596 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40992 +2025-03-12 04:19:27,597 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:19:27,598 - distributed.worker - INFO - Registered to: tls://127.0.0.1:45549 +2025-03-12 04:19:27,598 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:19:27,618 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:45549 +2025-03-12 04:19:27,922 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:45735 name: 1 +2025-03-12 04:19:27,923 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:45735 +2025-03-12 04:19:27,923 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40994 +2025-03-12 04:19:27,923 - distributed.core - INFO - Connection to tls://127.0.0.1:40994 has been closed. +2025-03-12 04:19:27,923 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:45735 name: 1 (stimulus_id='handle-worker-cleanup-1741702767.9237204') +2025-03-12 04:19:27,926 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:19:27,927 - distributed.batched - INFO - Batched Comm Closed Scheduler local=tls://127.0.0.1:40994 remote=tls://127.0.0.1:45549> Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory - s, ws = await start_cluster( - ^^^^^^^^^^^^^^^^^^^^ - ...<9 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster - raise TimeoutError("Cluster creation timeout") -TimeoutError: Cluster creation timeout -2026-04-13 07:27:03,844 - distributed.scheduler - INFO - State start -2026-04-13 07:27:03,850 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36803 -2026-04-13 07:27:03,856 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38505/status -2026-04-13 07:27:03,857 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:03,880 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46473'. Reason: failure-to-start- -2026-04-13 07:27:03,880 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46473' closed. -2026-04-13 07:27:03,880 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45843'. Reason: failure-to-start- -2026-04-13 07:27:03,880 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45843' closed. -2026-04-13 07:27:03,881 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36803': TLS handshake failed with remote 'tls://127.0.0.1:57224': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:03,881 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36803': TLS handshake failed with remote 'tls://127.0.0.1:57230': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:03,881 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe - comm = await self.rpc.connect(saddr) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect - return await self._connect(addr=addr, timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect - comm = await connect( - ^^^^^^^^^^^^^^ - ...<4 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect - comm = await wait_for( - ^^^^^^^^^^^^^^^ - ...<2 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect - stream = await self.client.connect( - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ) - ^ - File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect - af, addr, stream = await connector.start(connect_timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -asyncio.exceptions.CancelledError + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 297, in write + raise StreamClosedError() +tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start - await wait_for(self.start_unsafe(), timeout=timeout) - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for - async with asyncio.timeout(timeout): - ~~~~~~~~~~~~~~~^^^^^^^^^ - File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__ - raise TimeoutError from exc_val -TimeoutError - -The above exception was the direct cause of the following exception: - + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send + nbytes = yield coro + ^^^^^^^^^^ + File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run + value = future.result() + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 307, in write + convert_stream_closed_error(self, e) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error + raise CommClosedError(f"in {obj}: {exc}") from exc +distributed.comm.core.CommClosedError: in Scheduler local=tls://127.0.0.1:40994 remote=tls://127.0.0.1:45549>: Stream is closed +2025-03-12 04:19:27,929 - distributed.worker - INFO - Registered to: tls://127.0.0.1:45549 +2025-03-12 04:19:27,929 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:19:27,963 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:45549 +2025-03-12 04:19:27,963 - distributed.core - INFO - Connection to tls://127.0.0.1:45549 has been closed. +2025-03-12 04:19:27,963 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:45735. Reason: worker-handle-scheduler-connection-broken +2025-03-12 04:19:28,006 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:35819'. Reason: worker-handle-scheduler-connection-broken +2025-03-12 04:19:28,007 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:19:28,030 - distributed.nanny - INFO - Worker closed +2025-03-12 04:19:30,074 - distributed.nanny - ERROR - Worker process died unexpectedly +2025-03-12 04:19:31,088 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35819'. Reason: nanny-close-gracefully +2025-03-12 04:19:31,088 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35819' closed. +2025-03-12 04:19:57,998 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41285'. Reason: nanny-close +2025-03-12 04:19:57,998 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close +2025-03-12 04:19:58,006 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:41603. Reason: nanny-close +2025-03-12 04:19:58,006 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:19:58,008 - distributed.core - INFO - Connection to tls://127.0.0.1:45549 has been closed. +2025-03-12 04:19:58,009 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:40992; closing. +2025-03-12 04:19:58,009 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:41603 name: 0 (stimulus_id='handle-worker-cleanup-1741702798.0096931') +2025-03-12 04:19:58,009 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:19:58,023 - distributed.nanny - INFO - Worker closed +2025-03-12 04:19:58,798 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41285' closed. +2025-03-12 04:19:58,798 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:19:58,798 - distributed.scheduler - INFO - Scheduler closing all comms +2025-03-12 04:19:58,799 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory s, ws = await start_cluster( @@ -9533,27 +11103,20 @@ ...<9 lines>... ) ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster - await asyncio.gather(*workers) - File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable - return await awaitable - ^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start - raise asyncio.TimeoutError( - f"{type(self).__name__} start timed out after {timeout}s." - ) from exc -TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:04,892 - distributed.scheduler - INFO - State start -2026-04-13 07:27:04,897 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34593 -2026-04-13 07:27:04,908 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35503/status -2026-04-13 07:27:04,909 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:04,927 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36939'. Reason: failure-to-start- -2026-04-13 07:27:04,928 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36939' closed. -2026-04-13 07:27:04,928 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34025'. Reason: failure-to-start- -2026-04-13 07:27:04,928 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34025' closed. -2026-04-13 07:27:04,928 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34593': TLS handshake failed with remote 'tls://127.0.0.1:49496': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:04,928 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34593': TLS handshake failed with remote 'tls://127.0.0.1:49500': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:04,928 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying + File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster + raise TimeoutError("Cluster creation timeout") +TimeoutError: Cluster creation timeout +2025-03-12 04:19:59,803 - distributed.scheduler - INFO - State start +2025-03-12 04:19:59,819 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36663 +2025-03-12 04:19:59,819 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36217/status +2025-03-12 04:19:59,819 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:19:59,871 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45947'. Reason: failure-to-start- +2025-03-12 04:19:59,871 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45947' closed. +2025-03-12 04:19:59,871 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36549'. Reason: failure-to-start- +2025-03-12 04:19:59,872 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36549' closed. +2025-03-12 04:19:59,872 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36663': TLS handshake failed with remote 'tls://127.0.0.1:50398': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:19:59,872 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36663': TLS handshake failed with remote 'tls://127.0.0.1:50414': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:19:59,872 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -9622,17 +11185,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:05,947 - distributed.scheduler - INFO - State start -2026-04-13 07:27:05,961 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44903 -2026-04-13 07:27:05,963 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37759/status -2026-04-13 07:27:05,964 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:05,990 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40033'. Reason: failure-to-start- -2026-04-13 07:27:05,991 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40033' closed. -2026-04-13 07:27:05,991 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44045'. Reason: failure-to-start- -2026-04-13 07:27:05,991 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44045' closed. -2026-04-13 07:27:05,991 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44903': TLS handshake failed with remote 'tls://127.0.0.1:40524': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:05,991 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44903': TLS handshake failed with remote 'tls://127.0.0.1:40528': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:05,992 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:00,895 - distributed.scheduler - INFO - State start +2025-03-12 04:20:00,918 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38727 +2025-03-12 04:20:00,919 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38089/status +2025-03-12 04:20:00,919 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:00,976 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38405'. Reason: failure-to-start- +2025-03-12 04:20:00,976 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38405' closed. +2025-03-12 04:20:00,977 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42785'. Reason: failure-to-start- +2025-03-12 04:20:00,977 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42785' closed. +2025-03-12 04:20:00,977 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38727': TLS handshake failed with remote 'tls://127.0.0.1:45548': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:00,977 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38727': TLS handshake failed with remote 'tls://127.0.0.1:45554': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:00,977 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -9701,17 +11264,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:07,000 - distributed.scheduler - INFO - State start -2026-04-13 07:27:07,006 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34171 -2026-04-13 07:27:07,016 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43657/status -2026-04-13 07:27:07,018 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:07,035 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46803'. Reason: failure-to-start- -2026-04-13 07:27:07,036 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46803' closed. -2026-04-13 07:27:07,036 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36233'. Reason: failure-to-start- -2026-04-13 07:27:07,036 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36233' closed. -2026-04-13 07:27:07,036 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34171': TLS handshake failed with remote 'tls://127.0.0.1:43142': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:07,036 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34171': TLS handshake failed with remote 'tls://127.0.0.1:43150': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:07,036 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:01,999 - distributed.scheduler - INFO - State start +2025-03-12 04:20:02,015 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38177 +2025-03-12 04:20:02,016 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45129/status +2025-03-12 04:20:02,016 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:02,052 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34765'. Reason: failure-to-start- +2025-03-12 04:20:02,052 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34765' closed. +2025-03-12 04:20:02,052 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44299'. Reason: failure-to-start- +2025-03-12 04:20:02,052 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44299' closed. +2025-03-12 04:20:02,053 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38177': TLS handshake failed with remote 'tls://127.0.0.1:59146': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:02,053 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38177': TLS handshake failed with remote 'tls://127.0.0.1:59150': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:02,053 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -9780,17 +11343,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:08,052 - distributed.scheduler - INFO - State start -2026-04-13 07:27:08,085 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45887 -2026-04-13 07:27:08,087 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41869/status -2026-04-13 07:27:08,089 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:08,115 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36737'. Reason: failure-to-start- -2026-04-13 07:27:08,116 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36737' closed. -2026-04-13 07:27:08,116 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40885'. Reason: failure-to-start- -2026-04-13 07:27:08,116 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40885' closed. -2026-04-13 07:27:08,116 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45887': TLS handshake failed with remote 'tls://127.0.0.1:52740': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:08,116 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45887': TLS handshake failed with remote 'tls://127.0.0.1:52750': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:08,117 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:03,075 - distributed.scheduler - INFO - State start +2025-03-12 04:20:03,087 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45027 +2025-03-12 04:20:03,087 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36009/status +2025-03-12 04:20:03,087 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:03,112 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39641'. Reason: failure-to-start- +2025-03-12 04:20:03,112 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39641' closed. +2025-03-12 04:20:03,112 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45281'. Reason: failure-to-start- +2025-03-12 04:20:03,112 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45281' closed. +2025-03-12 04:20:03,113 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45027': TLS handshake failed with remote 'tls://127.0.0.1:45324': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:03,113 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45027': TLS handshake failed with remote 'tls://127.0.0.1:45326': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:03,113 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -9859,17 +11422,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:09,124 - distributed.scheduler - INFO - State start -2026-04-13 07:27:09,130 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44539 -2026-04-13 07:27:09,140 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45529/status -2026-04-13 07:27:09,142 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:09,160 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41703'. Reason: failure-to-start- -2026-04-13 07:27:09,160 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41703' closed. -2026-04-13 07:27:09,160 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33511'. Reason: failure-to-start- -2026-04-13 07:27:09,160 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33511' closed. -2026-04-13 07:27:09,161 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44539': TLS handshake failed with remote 'tls://127.0.0.1:37732': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:09,161 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44539': TLS handshake failed with remote 'tls://127.0.0.1:37738': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:09,161 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:04,136 - distributed.scheduler - INFO - State start +2025-03-12 04:20:04,139 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43581 +2025-03-12 04:20:04,139 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:32777/status +2025-03-12 04:20:04,140 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:04,164 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42851'. Reason: failure-to-start- +2025-03-12 04:20:04,164 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42851' closed. +2025-03-12 04:20:04,164 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40837'. Reason: failure-to-start- +2025-03-12 04:20:04,164 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40837' closed. +2025-03-12 04:20:04,165 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43581': TLS handshake failed with remote 'tls://127.0.0.1:34570': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:04,165 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43581': TLS handshake failed with remote 'tls://127.0.0.1:34584': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:04,165 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -9938,17 +11501,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:10,176 - distributed.scheduler - INFO - State start -2026-04-13 07:27:10,190 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45155 -2026-04-13 07:27:10,192 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35903/status -2026-04-13 07:27:10,194 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:10,225 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44939'. Reason: failure-to-start- -2026-04-13 07:27:10,225 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44939' closed. -2026-04-13 07:27:10,225 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39745'. Reason: failure-to-start- -2026-04-13 07:27:10,225 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39745' closed. -2026-04-13 07:27:10,226 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45155': TLS handshake failed with remote 'tls://127.0.0.1:46386': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:10,226 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45155': TLS handshake failed with remote 'tls://127.0.0.1:46400': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:10,226 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:05,187 - distributed.scheduler - INFO - State start +2025-03-12 04:20:05,195 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:40783 +2025-03-12 04:20:05,195 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:39175/status +2025-03-12 04:20:05,195 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:05,224 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42409'. Reason: failure-to-start- +2025-03-12 04:20:05,224 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42409' closed. +2025-03-12 04:20:05,224 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41415'. Reason: failure-to-start- +2025-03-12 04:20:05,224 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41415' closed. +2025-03-12 04:20:05,225 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40783': TLS handshake failed with remote 'tls://127.0.0.1:47292': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:05,225 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40783': TLS handshake failed with remote 'tls://127.0.0.1:47306': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:05,225 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10017,17 +11580,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:11,232 - distributed.scheduler - INFO - State start -2026-04-13 07:27:11,246 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38469 -2026-04-13 07:27:11,248 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33391/status -2026-04-13 07:27:11,250 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:11,269 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36071'. Reason: failure-to-start- -2026-04-13 07:27:11,270 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36071' closed. -2026-04-13 07:27:11,270 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36013'. Reason: failure-to-start- -2026-04-13 07:27:11,270 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36013' closed. -2026-04-13 07:27:11,279 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38469': TLS handshake failed with remote 'tls://127.0.0.1:53354': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:11,279 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38469': TLS handshake failed with remote 'tls://127.0.0.1:53364': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:11,279 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:06,239 - distributed.scheduler - INFO - State start +2025-03-12 04:20:06,251 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39149 +2025-03-12 04:20:06,251 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33283/status +2025-03-12 04:20:06,251 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:06,286 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38111'. Reason: failure-to-start- +2025-03-12 04:20:06,286 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38111' closed. +2025-03-12 04:20:06,286 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36001'. Reason: failure-to-start- +2025-03-12 04:20:06,286 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36001' closed. +2025-03-12 04:20:06,287 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39149': TLS handshake failed with remote 'tls://127.0.0.1:46694': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:06,287 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39149': TLS handshake failed with remote 'tls://127.0.0.1:46696': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:06,287 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10096,17 +11659,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:12,284 - distributed.scheduler - INFO - State start -2026-04-13 07:27:12,298 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39605 -2026-04-13 07:27:12,300 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37803/status -2026-04-13 07:27:12,302 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:12,325 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39255'. Reason: failure-to-start- -2026-04-13 07:27:12,325 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39255' closed. -2026-04-13 07:27:12,325 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43939'. Reason: failure-to-start- -2026-04-13 07:27:12,325 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43939' closed. -2026-04-13 07:27:12,326 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39605': TLS handshake failed with remote 'tls://127.0.0.1:54164': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:12,326 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39605': TLS handshake failed with remote 'tls://127.0.0.1:54168': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:12,326 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:07,291 - distributed.scheduler - INFO - State start +2025-03-12 04:20:07,303 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:42861 +2025-03-12 04:20:07,303 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42525/status +2025-03-12 04:20:07,303 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:07,346 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43075'. Reason: failure-to-start- +2025-03-12 04:20:07,346 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43075' closed. +2025-03-12 04:20:07,346 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38863'. Reason: failure-to-start- +2025-03-12 04:20:07,346 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38863' closed. +2025-03-12 04:20:07,347 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42861': TLS handshake failed with remote 'tls://127.0.0.1:48152': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:07,347 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42861': TLS handshake failed with remote 'tls://127.0.0.1:48158': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:07,347 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10175,17 +11738,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:13,340 - distributed.scheduler - INFO - State start -2026-04-13 07:27:13,354 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41087 -2026-04-13 07:27:13,357 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36299/status -2026-04-13 07:27:13,358 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:13,370 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33203'. Reason: failure-to-start- -2026-04-13 07:27:13,370 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33203' closed. -2026-04-13 07:27:13,371 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45187'. Reason: failure-to-start- -2026-04-13 07:27:13,371 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45187' closed. -2026-04-13 07:27:13,371 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41087': TLS handshake failed with remote 'tls://127.0.0.1:55672': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:13,371 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41087': TLS handshake failed with remote 'tls://127.0.0.1:55678': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:13,371 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:08,355 - distributed.scheduler - INFO - State start +2025-03-12 04:20:08,363 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35245 +2025-03-12 04:20:08,363 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36665/status +2025-03-12 04:20:08,363 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:08,380 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36461'. Reason: failure-to-start- +2025-03-12 04:20:08,381 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36461' closed. +2025-03-12 04:20:08,381 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45771'. Reason: failure-to-start- +2025-03-12 04:20:08,381 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45771' closed. +2025-03-12 04:20:08,386 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35245': TLS handshake failed with remote 'tls://127.0.0.1:34812': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:08,386 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35245': TLS handshake failed with remote 'tls://127.0.0.1:34814': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:08,386 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10254,17 +11817,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:14,380 - distributed.scheduler - INFO - State start -2026-04-13 07:27:14,386 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:42359 -2026-04-13 07:27:14,396 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34949/status -2026-04-13 07:27:14,398 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:14,428 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33739'. Reason: failure-to-start- -2026-04-13 07:27:14,428 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33739' closed. -2026-04-13 07:27:14,428 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46023'. Reason: failure-to-start- -2026-04-13 07:27:14,429 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46023' closed. -2026-04-13 07:27:14,429 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42359': TLS handshake failed with remote 'tls://127.0.0.1:38510': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:14,429 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42359': TLS handshake failed with remote 'tls://127.0.0.1:38520': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:14,429 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:09,391 - distributed.scheduler - INFO - State start +2025-03-12 04:20:09,402 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36067 +2025-03-12 04:20:09,403 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43595/status +2025-03-12 04:20:09,403 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:09,432 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43015'. Reason: failure-to-start- +2025-03-12 04:20:09,433 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43015' closed. +2025-03-12 04:20:09,433 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41169'. Reason: failure-to-start- +2025-03-12 04:20:09,433 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41169' closed. +2025-03-12 04:20:09,433 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36067': TLS handshake failed with remote 'tls://127.0.0.1:37096': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:09,446 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36067': TLS handshake failed with remote 'tls://127.0.0.1:37110': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:09,446 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10333,17 +11896,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:15,436 - distributed.scheduler - INFO - State start -2026-04-13 07:27:15,442 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43935 -2026-04-13 07:27:15,452 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43821/status -2026-04-13 07:27:15,454 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:15,472 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37435'. Reason: failure-to-start- -2026-04-13 07:27:15,472 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37435' closed. -2026-04-13 07:27:15,472 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33779'. Reason: failure-to-start- -2026-04-13 07:27:15,472 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33779' closed. -2026-04-13 07:27:15,473 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43935': TLS handshake failed with remote 'tls://127.0.0.1:46356': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:15,473 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43935': TLS handshake failed with remote 'tls://127.0.0.1:46364': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:15,473 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:10,451 - distributed.scheduler - INFO - State start +2025-03-12 04:20:10,459 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39871 +2025-03-12 04:20:10,459 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38481/status +2025-03-12 04:20:10,459 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:10,471 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38587'. Reason: failure-to-start- +2025-03-12 04:20:10,472 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38587' closed. +2025-03-12 04:20:10,472 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37853'. Reason: failure-to-start- +2025-03-12 04:20:10,472 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37853' closed. +2025-03-12 04:20:10,472 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39871': TLS handshake failed with remote 'tls://127.0.0.1:42134': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:10,472 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39871': TLS handshake failed with remote 'tls://127.0.0.1:42146': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:10,472 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10412,17 +11975,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:16,488 - distributed.scheduler - INFO - State start -2026-04-13 07:27:16,494 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44901 -2026-04-13 07:27:16,504 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41965/status -2026-04-13 07:27:16,506 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:16,535 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36483'. Reason: failure-to-start- -2026-04-13 07:27:16,536 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36483' closed. -2026-04-13 07:27:16,536 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37473'. Reason: failure-to-start- -2026-04-13 07:27:16,536 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37473' closed. -2026-04-13 07:27:16,537 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44901': TLS handshake failed with remote 'tls://127.0.0.1:43666': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:16,537 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44901': TLS handshake failed with remote 'tls://127.0.0.1:43668': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:16,537 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:11,486 - distributed.scheduler - INFO - State start +2025-03-12 04:20:11,499 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35467 +2025-03-12 04:20:11,500 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:39193/status +2025-03-12 04:20:11,500 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:11,525 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46527'. Reason: failure-to-start- +2025-03-12 04:20:11,525 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46527' closed. +2025-03-12 04:20:11,525 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44877'. Reason: failure-to-start- +2025-03-12 04:20:11,525 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44877' closed. +2025-03-12 04:20:11,535 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35467': TLS handshake failed with remote 'tls://127.0.0.1:40068': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:11,535 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35467': TLS handshake failed with remote 'tls://127.0.0.1:40076': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:11,535 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10491,17 +12054,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:17,544 - distributed.scheduler - INFO - State start -2026-04-13 07:27:17,558 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35189 -2026-04-13 07:27:17,560 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44711/status -2026-04-13 07:27:17,562 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:17,581 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37603'. Reason: failure-to-start- -2026-04-13 07:27:17,582 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37603' closed. -2026-04-13 07:27:17,582 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33237'. Reason: failure-to-start- -2026-04-13 07:27:17,582 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33237' closed. -2026-04-13 07:27:17,591 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35189': TLS handshake failed with remote 'tls://127.0.0.1:38618': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:17,591 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35189': TLS handshake failed with remote 'tls://127.0.0.1:38628': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:17,591 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:12,543 - distributed.scheduler - INFO - State start +2025-03-12 04:20:12,554 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45021 +2025-03-12 04:20:12,555 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41239/status +2025-03-12 04:20:12,555 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:12,583 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43145'. Reason: failure-to-start- +2025-03-12 04:20:12,583 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43145' closed. +2025-03-12 04:20:12,583 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34905'. Reason: failure-to-start- +2025-03-12 04:20:12,583 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34905' closed. +2025-03-12 04:20:12,584 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45021': TLS handshake failed with remote 'tls://127.0.0.1:55738': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:12,584 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45021': TLS handshake failed with remote 'tls://127.0.0.1:55748': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:12,584 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10570,17 +12133,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:18,600 - distributed.scheduler - INFO - State start -2026-04-13 07:27:18,606 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35429 -2026-04-13 07:27:18,616 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42407/status -2026-04-13 07:27:18,618 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:18,648 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44571'. Reason: failure-to-start- -2026-04-13 07:27:18,649 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44571' closed. -2026-04-13 07:27:18,649 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38783'. Reason: failure-to-start- -2026-04-13 07:27:18,649 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38783' closed. -2026-04-13 07:27:18,651 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35429': TLS handshake failed with remote 'tls://127.0.0.1:54270': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:18,651 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35429': TLS handshake failed with remote 'tls://127.0.0.1:54286': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:18,651 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:13,599 - distributed.scheduler - INFO - State start +2025-03-12 04:20:13,610 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34793 +2025-03-12 04:20:13,611 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43865/status +2025-03-12 04:20:13,611 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:13,682 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41605'. Reason: failure-to-start- +2025-03-12 04:20:13,682 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41605' closed. +2025-03-12 04:20:13,682 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36891'. Reason: failure-to-start- +2025-03-12 04:20:13,682 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36891' closed. +2025-03-12 04:20:13,684 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34793': TLS handshake failed with remote 'tls://127.0.0.1:36618': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:13,684 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34793': TLS handshake failed with remote 'tls://127.0.0.1:36624': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:13,684 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10649,17 +12212,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:19,656 - distributed.scheduler - INFO - State start -2026-04-13 07:27:19,662 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39701 -2026-04-13 07:27:19,672 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35573/status -2026-04-13 07:27:19,674 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:19,697 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36595'. Reason: failure-to-start- -2026-04-13 07:27:19,698 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36595' closed. -2026-04-13 07:27:19,698 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44387'. Reason: failure-to-start- -2026-04-13 07:27:19,698 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44387' closed. -2026-04-13 07:27:19,698 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39701': TLS handshake failed with remote 'tls://127.0.0.1:34716': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:19,698 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39701': TLS handshake failed with remote 'tls://127.0.0.1:34724': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:19,699 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:14,699 - distributed.scheduler - INFO - State start +2025-03-12 04:20:14,706 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43775 +2025-03-12 04:20:14,707 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37795/status +2025-03-12 04:20:14,708 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:14,728 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43483'. Reason: failure-to-start- +2025-03-12 04:20:14,728 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43483' closed. +2025-03-12 04:20:14,728 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42827'. Reason: failure-to-start- +2025-03-12 04:20:14,728 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42827' closed. +2025-03-12 04:20:14,729 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43775': TLS handshake failed with remote 'tls://127.0.0.1:50596': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:14,729 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43775': TLS handshake failed with remote 'tls://127.0.0.1:50604': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:14,729 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10728,17 +12291,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:20,704 - distributed.scheduler - INFO - State start -2026-04-13 07:27:20,710 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46817 -2026-04-13 07:27:20,720 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42231/status -2026-04-13 07:27:20,722 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:20,751 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39941'. Reason: failure-to-start- -2026-04-13 07:27:20,752 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39941' closed. -2026-04-13 07:27:20,752 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40831'. Reason: failure-to-start- -2026-04-13 07:27:20,752 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40831' closed. -2026-04-13 07:27:20,752 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46817': TLS handshake failed with remote 'tls://127.0.0.1:45420': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:20,752 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46817': TLS handshake failed with remote 'tls://127.0.0.1:45426': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:20,753 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:15,747 - distributed.scheduler - INFO - State start +2025-03-12 04:20:15,750 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45175 +2025-03-12 04:20:15,751 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35207/status +2025-03-12 04:20:15,751 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:15,774 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41301'. Reason: failure-to-start- +2025-03-12 04:20:15,774 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41301' closed. +2025-03-12 04:20:15,774 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36863'. Reason: failure-to-start- +2025-03-12 04:20:15,774 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36863' closed. +2025-03-12 04:20:15,775 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45175': TLS handshake failed with remote 'tls://127.0.0.1:40310': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:15,775 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45175': TLS handshake failed with remote 'tls://127.0.0.1:40326': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:15,775 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10807,17 +12370,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:21,757 - distributed.scheduler - INFO - State start -2026-04-13 07:27:21,771 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43057 -2026-04-13 07:27:21,773 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36583/status -2026-04-13 07:27:21,775 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:21,793 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39051'. Reason: failure-to-start- -2026-04-13 07:27:21,794 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39051' closed. -2026-04-13 07:27:21,794 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38537'. Reason: failure-to-start- -2026-04-13 07:27:21,794 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38537' closed. -2026-04-13 07:27:21,803 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43057': TLS handshake failed with remote 'tls://127.0.0.1:42554': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:21,803 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43057': TLS handshake failed with remote 'tls://127.0.0.1:42558': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:21,803 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:16,783 - distributed.scheduler - INFO - State start +2025-03-12 04:20:16,786 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45379 +2025-03-12 04:20:16,787 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38607/status +2025-03-12 04:20:16,787 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:16,820 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35919'. Reason: failure-to-start- +2025-03-12 04:20:16,820 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35919' closed. +2025-03-12 04:20:16,820 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34921'. Reason: failure-to-start- +2025-03-12 04:20:16,820 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34921' closed. +2025-03-12 04:20:16,821 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45379': TLS handshake failed with remote 'tls://127.0.0.1:60900': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:16,821 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45379': TLS handshake failed with remote 'tls://127.0.0.1:60904': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:16,830 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10886,17 +12449,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:22,809 - distributed.scheduler - INFO - State start -2026-04-13 07:27:22,823 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38333 -2026-04-13 07:27:22,825 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40261/status -2026-04-13 07:27:22,827 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:22,856 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46119'. Reason: failure-to-start- -2026-04-13 07:27:22,856 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46119' closed. -2026-04-13 07:27:22,856 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42511'. Reason: failure-to-start- -2026-04-13 07:27:22,856 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42511' closed. -2026-04-13 07:27:22,857 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38333': TLS handshake failed with remote 'tls://127.0.0.1:43186': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:22,857 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38333': TLS handshake failed with remote 'tls://127.0.0.1:43200': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:22,857 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:17,835 - distributed.scheduler - INFO - State start +2025-03-12 04:20:17,846 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34475 +2025-03-12 04:20:17,847 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38569/status +2025-03-12 04:20:17,847 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:17,881 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33061'. Reason: failure-to-start- +2025-03-12 04:20:17,881 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33061' closed. +2025-03-12 04:20:17,881 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33631'. Reason: failure-to-start- +2025-03-12 04:20:17,881 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33631' closed. +2025-03-12 04:20:17,890 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34475': TLS handshake failed with remote 'tls://127.0.0.1:33834': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:17,890 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34475': TLS handshake failed with remote 'tls://127.0.0.1:33846': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:17,890 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -10965,17 +12528,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:23,863 - distributed.scheduler - INFO - State start -2026-04-13 07:27:23,868 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:40923 -2026-04-13 07:27:23,870 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44243/status -2026-04-13 07:27:23,873 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:23,885 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34023'. Reason: failure-to-start- -2026-04-13 07:27:23,885 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34023' closed. -2026-04-13 07:27:23,885 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39127'. Reason: failure-to-start- -2026-04-13 07:27:23,885 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39127' closed. -2026-04-13 07:27:23,886 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40923': TLS handshake failed with remote 'tls://127.0.0.1:47504': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:23,886 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40923': TLS handshake failed with remote 'tls://127.0.0.1:47508': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:23,886 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:18,895 - distributed.scheduler - INFO - State start +2025-03-12 04:20:18,906 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:40323 +2025-03-12 04:20:18,907 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45643/status +2025-03-12 04:20:18,907 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:18,937 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35343'. Reason: failure-to-start- +2025-03-12 04:20:18,937 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35343' closed. +2025-03-12 04:20:18,937 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34125'. Reason: failure-to-start- +2025-03-12 04:20:18,950 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34125' closed. +2025-03-12 04:20:18,950 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40323': TLS handshake failed with remote 'tls://127.0.0.1:60834': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:18,950 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40323': TLS handshake failed with remote 'tls://127.0.0.1:60848': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:18,950 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -11044,17 +12607,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:24,892 - distributed.scheduler - INFO - State start -2026-04-13 07:27:24,902 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38909 -2026-04-13 07:27:24,908 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40785/status -2026-04-13 07:27:24,910 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:24,941 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38165'. Reason: failure-to-start- -2026-04-13 07:27:24,942 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38165' closed. -2026-04-13 07:27:24,942 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38415'. Reason: failure-to-start- -2026-04-13 07:27:24,942 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38415' closed. -2026-04-13 07:27:24,947 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38909': TLS handshake failed with remote 'tls://127.0.0.1:46302': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:24,947 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38909': TLS handshake failed with remote 'tls://127.0.0.1:46308': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:24,947 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:19,955 - distributed.scheduler - INFO - State start +2025-03-12 04:20:19,966 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39227 +2025-03-12 04:20:19,967 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34067/status +2025-03-12 04:20:19,967 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:19,997 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39525'. Reason: failure-to-start- +2025-03-12 04:20:19,997 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39525' closed. +2025-03-12 04:20:20,010 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44379'. Reason: failure-to-start- +2025-03-12 04:20:20,010 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44379' closed. +2025-03-12 04:20:20,010 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39227': TLS handshake failed with remote 'tls://127.0.0.1:35858': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:20,011 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39227': TLS handshake failed with remote 'tls://127.0.0.1:35874': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:20,011 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -11123,17 +12686,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:25,952 - distributed.scheduler - INFO - State start -2026-04-13 07:27:25,962 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:32947 -2026-04-13 07:27:25,968 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35809/status -2026-04-13 07:27:25,974 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:26,000 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45533'. Reason: failure-to-start- -2026-04-13 07:27:26,000 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45533' closed. -2026-04-13 07:27:26,000 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37417'. Reason: failure-to-start- -2026-04-13 07:27:26,000 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37417' closed. -2026-04-13 07:27:26,001 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32947': TLS handshake failed with remote 'tls://127.0.0.1:47838': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:26,001 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:32947': TLS handshake failed with remote 'tls://127.0.0.1:47850': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:26,001 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:21,015 - distributed.scheduler - INFO - State start +2025-03-12 04:20:21,026 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46379 +2025-03-12 04:20:21,027 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34909/status +2025-03-12 04:20:21,027 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:21,060 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39159'. Reason: failure-to-start- +2025-03-12 04:20:21,060 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39159' closed. +2025-03-12 04:20:21,060 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40819'. Reason: failure-to-start- +2025-03-12 04:20:21,060 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40819' closed. +2025-03-12 04:20:21,070 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46379': TLS handshake failed with remote 'tls://127.0.0.1:43846': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:21,070 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46379': TLS handshake failed with remote 'tls://127.0.0.1:43848': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:21,070 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -11202,17 +12765,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:27,012 - distributed.scheduler - INFO - State start -2026-04-13 07:27:27,018 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35029 -2026-04-13 07:27:27,028 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42883/status -2026-04-13 07:27:27,031 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:27,062 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33927'. Reason: failure-to-start- -2026-04-13 07:27:27,063 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33927' closed. -2026-04-13 07:27:27,063 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37555'. Reason: failure-to-start- -2026-04-13 07:27:27,063 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37555' closed. -2026-04-13 07:27:27,063 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35029': TLS handshake failed with remote 'tls://127.0.0.1:35032': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:27,063 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35029': TLS handshake failed with remote 'tls://127.0.0.1:35038': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:27,064 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:22,075 - distributed.scheduler - INFO - State start +2025-03-12 04:20:22,086 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36667 +2025-03-12 04:20:22,087 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43131/status +2025-03-12 04:20:22,087 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:22,117 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34199'. Reason: failure-to-start- +2025-03-12 04:20:22,130 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34199' closed. +2025-03-12 04:20:22,130 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39045'. Reason: failure-to-start- +2025-03-12 04:20:22,130 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39045' closed. +2025-03-12 04:20:22,132 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36667': TLS handshake failed with remote 'tls://127.0.0.1:58918': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:22,132 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36667': TLS handshake failed with remote 'tls://127.0.0.1:58926': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:22,132 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -11281,17 +12844,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:28,069 - distributed.scheduler - INFO - State start -2026-04-13 07:27:28,083 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:36995 -2026-04-13 07:27:28,085 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34771/status -2026-04-13 07:27:28,087 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:28,115 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35683'. Reason: failure-to-start- -2026-04-13 07:27:28,115 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35683' closed. -2026-04-13 07:27:28,115 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34583'. Reason: failure-to-start- -2026-04-13 07:27:28,115 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34583' closed. -2026-04-13 07:27:28,116 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36995': TLS handshake failed with remote 'tls://127.0.0.1:49084': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:28,116 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:36995': TLS handshake failed with remote 'tls://127.0.0.1:49098': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:28,116 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:23,144 - distributed.scheduler - INFO - State start +2025-03-12 04:20:23,156 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34757 +2025-03-12 04:20:23,156 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38593/status +2025-03-12 04:20:23,156 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:23,188 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41559'. Reason: failure-to-start- +2025-03-12 04:20:23,189 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41559' closed. +2025-03-12 04:20:23,189 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43397'. Reason: failure-to-start- +2025-03-12 04:20:23,189 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43397' closed. +2025-03-12 04:20:23,197 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34757': TLS handshake failed with remote 'tls://127.0.0.1:45854': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:23,198 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34757': TLS handshake failed with remote 'tls://127.0.0.1:45864': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:23,198 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -11360,17 +12923,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:27:29,120 - distributed.scheduler - INFO - State start -2026-04-13 07:27:29,134 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41121 -2026-04-13 07:27:29,136 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35177/status -2026-04-13 07:27:29,138 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:27:29,171 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45811'. Reason: failure-to-start- -2026-04-13 07:27:29,171 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45811' closed. -2026-04-13 07:27:29,171 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32939'. Reason: failure-to-start- -2026-04-13 07:27:29,171 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32939' closed. -2026-04-13 07:27:29,172 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41121': TLS handshake failed with remote 'tls://127.0.0.1:50044': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:29,172 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41121': TLS handshake failed with remote 'tls://127.0.0.1:50050': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:27:29,172 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:20:24,204 - distributed.scheduler - INFO - State start +2025-03-12 04:20:24,216 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34531 +2025-03-12 04:20:24,216 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41327/status +2025-03-12 04:20:24,216 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:24,272 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41171'. Reason: failure-to-start- +2025-03-12 04:20:24,272 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41171' closed. +2025-03-12 04:20:24,272 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36679'. Reason: failure-to-start- +2025-03-12 04:20:24,272 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36679' closed. +2025-03-12 04:20:24,284 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34531': TLS handshake failed with remote 'tls://127.0.0.1:45028': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:24,284 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34531': TLS handshake failed with remote 'tls://127.0.0.1:45038': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:20:24,284 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -11440,246 +13003,31 @@ ) from exc TimeoutError: Nanny start timed out after 0s. ============================= slowest 20 durations ============================= -60.01s call distributed/tests/test_tls_functional.py::test_retire_workers 60.00s call distributed/tests/test_tls_functional.py::test_nanny -12.60s call distributed/tests/test_gc.py::test_gc_diagnosis_cpu_time -10.02s call distributed/tests/test_utils_test.py::test_popen_timeout -8.40s call distributed/tests/test_stress.py::test_cancel_stress -6.94s call distributed/shuffle/tests/test_rechunk.py::test_rechunk_method -6.83s call distributed/tests/test_stress.py::test_cancel_stress_sync -6.82s call distributed/tests/test_nanny.py::test_num_fds -6.33s call distributed/tests/test_failed_workers.py::test_worker_doesnt_await_task_completion -5.25s call distributed/tests/test_steal.py::test_balance_with_longer_task -5.17s call distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x3-chunks3] -5.10s call distributed/cli/tests/test_tls_cli.py::test_nanny -5.04s call distributed/tests/test_chaos.py::test_KillWorker[sys.exit] -4.94s call distributed/diagnostics/tests/test_progress.py::test_group_timing -4.82s call distributed/shuffle/tests/test_rechunk.py::test_homogeneously_schedule_unpack -4.66s call distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x5-chunks5] -4.60s call distributed/cli/tests/test_tls_cli.py::test_basic -4.50s call distributed/shuffle/tests/test_rechunk.py::test_rechunk_with_partially_unknown_dimension[x4-chunks4] -4.44s call distributed/cli/tests/test_dask_scheduler.py::test_interface -4.36s call distributed/deploy/tests/test_cluster.py::test_cluster_wait_for_worker +2.31s call distributed/tests/test_client.py::test_client_disconnect_exception_on_cancelled_futures +1.66s call distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp +0.69s call distributed/tests/test_worker.py::test_log_remove_worker +0.63s call distributed/tests/test_worker_memory.py::test_pause_while_spilling +0.52s call distributed/tests/test_worker_memory.py::test_pause_while_idle +0.51s call distributed/tests/test_worker_memory.py::test_pause_while_saturated +0.32s call distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill +0.00s setup distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp +0.00s teardown distributed/tests/test_tls_functional.py::test_nanny +0.00s setup distributed/tests/test_worker.py::test_log_remove_worker +0.00s teardown distributed/tests/test_client.py::test_computation_object_code_client_submit_list_comp +0.00s teardown distributed/tests/test_client.py::test_client_disconnect_exception_on_cancelled_futures +0.00s teardown distributed/tests/test_worker_memory.py::test_pause_while_idle +0.00s teardown distributed/tests/test_worker_memory.py::test_fail_to_pickle_spill +0.00s setup distributed/tests/test_client.py::test_client_disconnect_exception_on_cancelled_futures +0.00s teardown distributed/tests/test_worker_memory.py::test_pause_while_saturated +0.00s setup distributed/tests/test_tls_functional.py::test_nanny +0.00s teardown distributed/tests/test_worker.py::test_log_remove_worker +0.00s teardown distributed/tests/test_worker_memory.py::test_pause_while_spilling =========================== short test summary info ============================ -SKIPPED [1] distributed/cli/tests/test_dask_ssh.py:9: could not import 'paramiko': No module named 'paramiko' -SKIPPED [1] distributed/comm/tests/test_ucx.py:15: could not import 'ucp': No module named 'ucp' -SKIPPED [1] distributed/comm/tests/test_ucx_config.py:23: could not import 'ucp': No module named 'ucp' -SKIPPED [1] distributed/dashboard/tests/test_components.py:5: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/dashboard/tests/test_scheduler_bokeh.py:11: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/dashboard/tests/test_worker_bokeh.py:10: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/deploy/tests/test_old_ssh.py:7: could not import 'paramiko': No module named 'paramiko' -SKIPPED [1] distributed/deploy/tests/test_ssh.py:5: could not import 'asyncssh': No module named 'asyncssh' -SKIPPED [1] distributed/diagnostics/tests/test_cudf_diagnostics.py:20: could not import 'cudf': No module named 'cudf' -SKIPPED [1] distributed/diagnostics/tests/test_memray.py:5: could not import 'memray': No module named 'memray' -SKIPPED [1] distributed/diagnostics/tests/test_nvml.py:11: could not import 'pynvml': No module named 'pynvml' -SKIPPED [1] distributed/diagnostics/tests/test_progress_stream.py:5: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/diagnostics/tests/test_progress_widgets.py:14: could not import 'ipywidgets': No module named 'ipywidgets' -SKIPPED [1] distributed/diagnostics/tests/test_rmm_diagnostics.py:14: could not import 'dask_cuda': No module named 'dask_cuda' -SKIPPED [1] distributed/protocol/tests/test_arrow.py:5: could not import 'pyarrow': No module named 'pyarrow' -SKIPPED [1] distributed/protocol/tests/test_cupy.py:11: could not import 'cupy': No module named 'cupy' -SKIPPED [1] distributed/protocol/tests/test_h5py.py:8: could not import 'h5py': No module named 'h5py' -SKIPPED [1] distributed/protocol/tests/test_keras.py:5: could not import 'keras': No module named 'keras' -SKIPPED [1] distributed/protocol/tests/test_netcdf4.py:5: could not import 'netCDF4': No module named 'netCDF4' -SKIPPED [1] distributed/protocol/tests/test_numba.py:11: could not import 'numba.cuda': No module named 'numba' -SKIPPED [1] distributed/protocol/tests/test_rmm.py:10: could not import 'numba.cuda': No module named 'numba' -SKIPPED [1] distributed/protocol/tests/test_scipy.py:8: could not import 'scipy': No module named 'scipy' -SKIPPED [1] distributed/protocol/tests/test_sparse.py:6: could not import 'sparse': No module named 'sparse' -SKIPPED [1] distributed/protocol/tests/test_torch.py:8: could not import 'torch': No module named 'torch' -SKIPPED [1] distributed/shuffle/tests/test_graph.py:9: could not import 'pyarrow': No module named 'pyarrow' -SKIPPED [1] distributed/shuffle/tests/test_merge.py:71: could not import 'pyarrow': No module named 'pyarrow' -SKIPPED [1] distributed/shuffle/tests/test_merge_column_and_index.py:110: could not import 'pyarrow': No module named 'pyarrow' -SKIPPED [1] distributed/shuffle/tests/test_metrics.py:71: could not import 'pyarrow': No module named 'pyarrow' -SKIPPED [1] distributed/shuffle/tests/test_shuffle.py:195: could not import 'pyarrow': No module named 'pyarrow' -SKIPPED [1] distributed/tests/test_parse_stdout.py:14: could not import 'parse_stdout': No module named 'parse_stdout' -SKIPPED [1] distributed/cli/tests/test_dask_scheduler.py:552: need --runslow option to run -SKIPPED [1] distributed/cli/tests/test_dask_scheduler.py:564: need --runslow option to run -SKIPPED [2] distributed/cli/tests/test_dask_scheduler.py:662: need --runslow option to run -SKIPPED [2] distributed/cli/tests/test_dask_worker.py:616: need --runslow option to run -SKIPPED [1] distributed/deploy/tests/test_subprocess.py:77: need --runslow option to run -SKIPPED [1] distributed/deploy/tests/test_subprocess.py:92: Windows-specific error testing (distributed#7434) -SKIPPED [1] distributed/diagnostics/tests/test_eventstream.py:16: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/diagnostics/tests/test_install_plugin.py:143: need --runslow option to run -SKIPPED [1] distributed/diagnostics/tests/test_install_plugin.py:151: need --runslow option to run -SKIPPED [1] distributed/diagnostics/tests/test_install_plugin.py:158: need --runslow option to run -SKIPPED [2] distributed/diagnostics/tests/test_memory_sampler.py:103: could not import 'matplotlib': No module named 'matplotlib' -SKIPPED [1] distributed/diagnostics/tests/test_task_stream.py:140: could not import 'bokeh.models': No module named 'bokeh' -SKIPPED [1] distributed/diagnostics/tests/test_task_stream.py:151: could not import 'bokeh.models': No module named 'bokeh' -SKIPPED [1] distributed/http/scheduler/tests/test_missing_bokeh.py:35: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/http/scheduler/tests/test_missing_bokeh.py:59: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:87: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:535: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:648: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:689: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:709: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:732: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:754: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/scheduler/tests/test_scheduler_http.py:775: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/worker/tests/test_worker_http.py:150: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/worker/tests/test_worker_http.py:163: could not import 'aiohttp': No module named 'aiohttp' -SKIPPED [1] distributed/http/worker/tests/test_worker_http.py:234: Fails on 32-bit, seems to be large memory request -SKIPPED [4] distributed/protocol/tests/test_collection_cuda.py:12: could not import 'cupy': No module named 'cupy' -SKIPPED [4] distributed/protocol/tests/test_collection_cuda.py:44: could not import 'cudf': No module named 'cudf' -SKIPPED [1] distributed/protocol/tests/test_compression.py:125: could not import 'lz4': No module named 'lz4' -SKIPPED [2] distributed/protocol/tests/test_compression.py:132: could not import 'lz4': No module named 'lz4' -SKIPPED [2] distributed/protocol/tests/test_compression.py:132: could not import 'snappy': No module named 'snappy' -SKIPPED [2] distributed/protocol/tests/test_compression.py:132: could not import 'zstandard': No module named 'zstandard' -SKIPPED [1] distributed/protocol/tests/test_compression.py:143: could not import 'lz4': No module named 'lz4' -SKIPPED [1] distributed/protocol/tests/test_compression.py:143: could not import 'snappy': No module named 'snappy' -SKIPPED [1] distributed/protocol/tests/test_compression.py:143: could not import 'zstandard': No module named 'zstandard' -SKIPPED [1] distributed/protocol/tests/test_compression.py:151: could not import 'lz4': No module named 'lz4' -SKIPPED [1] distributed/protocol/tests/test_compression.py:151: could not import 'snappy': No module named 'snappy' -SKIPPED [1] distributed/protocol/tests/test_compression.py:151: could not import 'zstandard': No module named 'zstandard' -SKIPPED [10] distributed/protocol/tests/test_compression.py:168: need --runslow option to run -SKIPPED [5] distributed/protocol/tests/test_compression.py:188: need --runslow option to run -SKIPPED [1] distributed/protocol/tests/test_numpy.py:150: could not import 'numpy.core.test_rational': No module named 'numpy.core.test_rational' -SKIPPED [1] distributed/protocol/tests/test_numpy.py:180: need --runslow option to run -SKIPPED [1] distributed/protocol/tests/test_pickle.py:275: need --runslow option to run -SKIPPED [3] distributed/protocol/tests/test_protocol.py:219: need --runslow option to run -SKIPPED [1] distributed/shuffle/tests/test_rechunk.py:1326: need --runslow option to run -SKIPPED [1] distributed/tests/test_active_memory_manager.py:1304: need --runslow option to run -SKIPPED [1] distributed/tests/test_active_memory_manager.py:1317: need --runslow option to run -SKIPPED [1] distributed/tests/test_active_memory_manager.py:1339: need --runslow option to run -SKIPPED [2] distributed/tests/test_active_memory_manager.py:1360: need --runslow option to run -SKIPPED [1] distributed/tests/test_batched.py:158: need --runslow option to run -SKIPPED [1] distributed/tests/test_batched.py:228: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:859: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:1707: Upstream issue in CPython. See https://github.com/dask/distributed/issues/8708 and https://github.com/python/cpython/issues/121342. -SKIPPED [1] distributed/tests/test_client.py:1736: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:1870: unconditional skip -SKIPPED [1] distributed/tests/test_client.py:2088: unconditional skip -SKIPPED [1] distributed/tests/test_client.py:2677: Use fast random selection now -SKIPPED [1] distributed/tests/test_client.py:3301: unconditional skip -SKIPPED [1] distributed/tests/test_client.py:3585: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:3650: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:3762: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:4563: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:4667: Now prefer first-in-first-out -SKIPPED [1] distributed/tests/test_client.py:4697: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:4804: need --runslow option to run -SKIPPED [2] distributed/tests/test_client.py:4849: need --runslow option to run -SKIPPED [2] distributed/tests/test_client.py:4869: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:4896: need --runslow option to run -SKIPPED [2] distributed/tests/test_client.py:4909: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:5097: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:5118: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:5417: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:5643: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:5844: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:5961: unconditional skip -SKIPPED [1] distributed/tests/test_client.py:6179: could not import 'bokeh.plotting': No module named 'bokeh' -SKIPPED [1] distributed/tests/test_client.py:6540: known intermittent failure -SKIPPED [2] distributed/tests/test_client.py:6621: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:6695: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:7065: could not import 'bokeh': No module named 'bokeh' -SKIPPED [2] distributed/tests/test_client.py:7166: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:7265: numpy >=1.25 can capture ufunc code -SKIPPED [1] distributed/tests/test_client.py:7413: need --runslow option to run -SKIPPED [2] distributed/tests/test_client.py:8031: need --runslow option to run -SKIPPED [1] distributed/tests/test_client.py:8235: need --runslow option to run -SKIPPED [2] distributed/tests/test_client.py:8286: need --runslow option to run -SKIPPED [1] distributed/tests/test_client_executor.py:146: need --runslow option to run -SKIPPED [1] distributed/tests/test_config.py:356: could not import 'uvloop': No module named 'uvloop' -SKIPPED [1] distributed/tests/test_core.py:450: need --runslow option to run -SKIPPED [1] distributed/tests/test_core.py:915: could not import 'crick': No module named 'crick' -SKIPPED [1] distributed/tests/test_core.py:924: could not import 'crick': No module named 'crick' -SKIPPED [1] distributed/tests/test_core.py:1386: need --runslow option to run -SKIPPED [1] distributed/tests/test_counter.py:13: no crick library -SKIPPED [1] distributed/tests/test_dask_collections.py:221: could not import 'sparse': No module named 'sparse' -SKIPPED [1] distributed/tests/test_diskutils.py:224: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:41: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:117: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:128: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:287: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:358: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:442: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:454: need --runslow option to run -SKIPPED [2] distributed/tests/test_failed_workers.py:491: need --runslow option to run -SKIPPED [1] distributed/tests/test_failed_workers.py:537: need --runslow option to run -SKIPPED [1] distributed/tests/test_jupyter.py:48: need --runslow option to run -SKIPPED [1] distributed/tests/test_metrics.py:30: WindowsTime doesn't work with high accuracy base timer -SKIPPED [1] distributed/tests/test_nanny.py:93: need --runslow option to run -SKIPPED [1] distributed/tests/test_nanny.py:107: need --runslow option to run -SKIPPED [1] distributed/tests/test_nanny.py:141: need --runslow option to run -SKIPPED [1] distributed/tests/test_nanny.py:291: need --runslow option to run -SKIPPED [1] distributed/tests/test_nanny.py:490: need --runslow option to run -SKIPPED [2] distributed/tests/test_nanny.py:567: could not import 'ucp': No module named 'ucp' -SKIPPED [1] distributed/tests/test_nanny.py:682: need --runslow option to run -SKIPPED [1] distributed/tests/test_nanny.py:690: need --runslow option to run -SKIPPED [1] distributed/tests/test_nanny.py:731: need --runslow option to run -SKIPPED [1] distributed/tests/test_nanny.py:774: need --runslow option to run -SKIPPED [1] distributed/tests/test_profile.py:75: could not import 'stacktrace': No module named 'stacktrace' -SKIPPED [1] distributed/tests/test_queues.py:114: need --runslow option to run -SKIPPED [1] distributed/tests/test_resources.py:363: Skipped -SKIPPED [1] distributed/tests/test_resources.py:422: Should protect resource keys from optimization -SKIPPED [1] distributed/tests/test_resources.py:445: atop fusion seemed to break this -SKIPPED [1] distributed/tests/test_scheduler.py:286: Not relevant with queuing on; see https://github.com/dask/distributed/issues/7204 -SKIPPED [1] distributed/tests/test_scheduler.py:421: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1075: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1103: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1124: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1216: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1234: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1625: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1680: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1697: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1860: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:1900: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:2095: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:2356: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:2481: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:2508: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:2556: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:2606: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:2661: could not import 'bokeh': No module named 'bokeh' -SKIPPED [1] distributed/tests/test_scheduler.py:3112: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:3512: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:3691: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:3723: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:3866: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:3923: need --runslow option to run -SKIPPED [1] distributed/tests/test_scheduler.py:3943: need --runslow option to run -SKIPPED [1] distributed/tests/test_steal.py:106: need --runslow option to run -SKIPPED [1] distributed/tests/test_steal.py:285: Skipped -SKIPPED [1] distributed/tests/test_steal.py:1287: executing heartbeats not considered yet -SKIPPED [1] distributed/tests/test_steal.py:1353: need --runslow option to run -SKIPPED [2] distributed/tests/test_stress.py:51: need --runslow option to run -SKIPPED [1] distributed/tests/test_stress.py:100: need --runslow option to run -SKIPPED [1] distributed/tests/test_stress.py:209: unconditional skip -SKIPPED [1] distributed/tests/test_stress.py:240: need --runslow option to run -SKIPPED [1] distributed/tests/test_stress.py:269: need --runslow option to run -SKIPPED [1] distributed/tests/test_stress.py:310: need --runslow option to run -SKIPPED [1] distributed/tests/test_system_monitor.py:111: could not import 'gilknocker': No module named 'gilknocker' -SKIPPED [1] distributed/tests/test_utils.py:166: could not import 'IPython': No module named 'IPython' -SKIPPED [1] distributed/tests/test_utils.py:347: could not import 'pyarrow': No module named 'pyarrow' -SKIPPED [1] distributed/tests/test_utils_test.py:146: This hangs on travis -SKIPPED [1] distributed/tests/test_utils_test.py:721: need --runslow option to run -SKIPPED [1] distributed/tests/test_variable.py:205: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:219: don't yet support uploading pyc files -SKIPPED [1] distributed/tests/test_worker.py:309: could not import 'crick': No module named 'crick' -SKIPPED [1] distributed/tests/test_worker.py:345: need --runslow option to run -SKIPPED [8] distributed/tests/test_worker.py:420: need --runslow option to run -SKIPPED [5] distributed/tests/test_worker.py:562: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1180: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1264: need --runslow option to run -SKIPPED [2] distributed/tests/test_worker.py:1453: could not import 'ucp': No module named 'ucp' -SKIPPED [1] distributed/tests/test_worker.py:1517: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1549: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1580: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1607: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1635: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1702: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:1806: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:2705: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:3399: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker.py:3429: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker_memory.py:871: need --runslow option to run -SKIPPED [2] distributed/tests/test_worker_memory.py:883: need --runslow option to run -SKIPPED [1] distributed/tests/test_worker_memory.py:997: need --runslow option to run FAILED distributed/tests/test_tls_functional.py::test_nanny - TimeoutError -FAILED distributed/tests/test_tls_functional.py::test_retire_workers - TimeoutError -= 2 failed, 2910 passed, 265 skipped, 222 deselected, 15 xfailed, 8 xpassed in 1414.91s (0:23:34) = -*** END OF RUN 1: NOT ALL TESTS HAVE YET PASSED/XFAILED *** -*** STARTING RUN 2: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task +============= 1 failed, 7 passed, 4 deselected in 69.98s (0:01:09) ============= +*** END OF RUN 2: NOT ALL TESTS HAVE YET PASSED/XFAILED *** +*** STARTING RUN 3: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task ============================= test session starts ============================== platform linux -- Python 3.13.2, pytest-8.3.4, pluggy-1.5.0 -- /usr/bin/python3.13 cachedir: .pytest_cache @@ -11689,16 +13037,15 @@ timeout: 300.0s timeout method: signal timeout func_only: False -collecting ... collected 2 items -run-last-failure: rerun previous 2 failures (skipped 148 files) +collecting ... collected 1 item +run-last-failure: rerun previous 1 failure (skipped 148 files) -distributed/tests/test_tls_functional.py::test_nanny PASSED [ 50%] -distributed/tests/test_tls_functional.py::test_retire_workers FAILED [100%] +distributed/tests/test_tls_functional.py::test_nanny FAILED [100%] =================================== FAILURES =================================== -_____________________________ test_retire_workers ______________________________ +__________________________________ test_nanny __________________________________ -fut = , timeout = 0 +fut = , timeout = 0 async def wait_for(fut: Awaitable[T], timeout: float) -> T: async with asyncio.timeout(timeout): @@ -11720,10 +13067,10 @@ stream = await self.client.connect( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -self = -host = '127.0.0.1', port = 44543, af = -ssl_options = -max_buffer_size = 31544631296.0, source_ip = None, source_port = None +self = +host = '127.0.0.1', port = 34081, af = +ssl_options = +max_buffer_size = 31544633344.0, source_ip = None, source_port = None timeout = None async def connect( @@ -11819,7 +13166,7 @@ self = exc_type = -exc_val = CancelledError(), exc_tb = +exc_val = CancelledError(), exc_tb = async def __aexit__( self, @@ -11904,7 +13251,7 @@ During handling of the above exception, another exception occurred: -fut = ._..test_func..async_fn at 0xffffb571a6c0> +fut = ._..test_func..async_fn at 0xffff9d2066c0> timeout = 60 async def wait_for(fut: Awaitable[T], timeout: float) -> T: @@ -11976,7 +13323,7 @@ self = exc_type = -exc_val = CancelledError(), exc_tb = +exc_val = CancelledError(), exc_tb = async def __aexit__( self, @@ -12002,88 +13349,72 @@ /usr/lib/python3.13/asyncio/timeouts.py:116: TimeoutError ----------------------------- Captured stderr call ----------------------------- -2026-04-13 07:32:09,277 - distributed.scheduler - INFO - State start -2026-04-13 07:32:09,289 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37951 -2026-04-13 07:32:09,290 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:32979/status -2026-04-13 07:32:09,290 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:09,344 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:42881' -2026-04-13 07:32:09,367 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:45221' -2026-04-13 07:32:10,170 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:40861 -2026-04-13 07:32:10,178 - distributed.worker - INFO - Listening to: tls://127.0.0.1:40861 -2026-04-13 07:32:10,178 - distributed.worker - INFO - Worker name: 1 -2026-04-13 07:32:10,179 - distributed.worker - INFO - dashboard at: 127.0.0.1:43677 -2026-04-13 07:32:10,179 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:37951 -2026-04-13 07:32:10,179 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:32:10,179 - distributed.worker - INFO - Threads: 2 -2026-04-13 07:32:10,179 - distributed.worker - INFO - Memory: 58.76 GiB -2026-04-13 07:32:10,179 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-5byhi3qn -2026-04-13 07:32:10,179 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:32:10,485 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:33079 -2026-04-13 07:32:10,486 - distributed.worker - INFO - Listening to: tls://127.0.0.1:33079 -2026-04-13 07:32:10,486 - distributed.worker - INFO - Worker name: 0 -2026-04-13 07:32:10,486 - distributed.worker - INFO - dashboard at: 127.0.0.1:45547 -2026-04-13 07:32:10,486 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:37951 -2026-04-13 07:32:10,486 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:32:10,486 - distributed.worker - INFO - Threads: 1 -2026-04-13 07:32:10,486 - distributed.worker - INFO - Memory: 58.76 GiB -2026-04-13 07:32:10,486 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-dwy63z9h -2026-04-13 07:32:10,486 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:32:10,778 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:40861 name: 1 -2026-04-13 07:32:10,779 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:40861 -2026-04-13 07:32:10,779 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:59434 -2026-04-13 07:32:10,783 - distributed.worker - INFO - Starting Worker plugin shuffle -2026-04-13 07:32:10,784 - distributed.worker - INFO - Registered to: tls://127.0.0.1:37951 -2026-04-13 07:32:10,784 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:32:10,785 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:37951 -2026-04-13 07:32:11,031 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:33079 name: 0 -2026-04-13 07:32:11,032 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:33079 -2026-04-13 07:32:11,032 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:59436 -2026-04-13 07:32:11,032 - distributed.core - INFO - Connection to tls://127.0.0.1:59436 has been closed. -2026-04-13 07:32:11,032 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:33079 name: 0 (stimulus_id='handle-worker-cleanup-1776108731.0326529') -2026-04-13 07:32:11,035 - distributed.worker - INFO - Starting Worker plugin shuffle -2026-04-13 07:32:11,036 - distributed.batched - INFO - Batched Comm Closed Scheduler local=tls://127.0.0.1:59436 remote=tls://127.0.0.1:37951> -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 297, in write - raise StreamClosedError() -tornado.iostream.StreamClosedError: Stream is closed - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/batched.py", line 115, in _background_send - nbytes = yield coro - ^^^^^^^^^^ - File "/usr/lib/python3/dist-packages/tornado/gen.py", line 766, in run - value = future.result() - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 307, in write - convert_stream_closed_error(self, e) - ~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 142, in convert_stream_closed_error - raise CommClosedError(f"in {obj}: {exc}") from exc -distributed.comm.core.CommClosedError: in Scheduler local=tls://127.0.0.1:59436 remote=tls://127.0.0.1:37951>: Stream is closed -2026-04-13 07:32:11,038 - distributed.worker - INFO - Registered to: tls://127.0.0.1:37951 -2026-04-13 07:32:11,038 - distributed.worker - INFO - ------------------------------------------------- -2026-04-13 07:32:11,062 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:37951 -2026-04-13 07:32:11,063 - distributed.core - INFO - Connection to tls://127.0.0.1:37951 has been closed. -2026-04-13 07:32:11,063 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:33079. Reason: worker-handle-scheduler-connection-broken -2026-04-13 07:32:11,087 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:42881'. Reason: worker-handle-scheduler-connection-broken -2026-04-13 07:32:11,088 - distributed.worker - INFO - Removing Worker plugin shuffle -2026-04-13 07:32:11,111 - distributed.nanny - INFO - Worker closed -2026-04-13 07:32:13,751 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42881'. Reason: nanny-close-gracefully -2026-04-13 07:32:13,751 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42881' closed. -2026-04-13 07:32:41,094 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45221'. Reason: nanny-close -2026-04-13 07:32:41,095 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close -2026-04-13 07:32:41,106 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:40861. Reason: nanny-close -2026-04-13 07:32:41,107 - distributed.worker - INFO - Removing Worker plugin shuffle -2026-04-13 07:32:41,108 - distributed.core - INFO - Connection to tls://127.0.0.1:37951 has been closed. -2026-04-13 07:32:41,111 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:59434; closing. -2026-04-13 07:32:41,112 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:40861 name: 1 (stimulus_id='handle-worker-cleanup-1776108761.1121411') -2026-04-13 07:32:41,112 - distributed.scheduler - INFO - Lost all workers -2026-04-13 07:32:41,131 - distributed.nanny - INFO - Worker closed -2026-04-13 07:32:41,667 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45221' closed. -2026-04-13 07:32:41,667 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown -2026-04-13 07:32:41,668 - distributed.scheduler - INFO - Scheduler closing all comms -2026-04-13 07:32:41,668 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying +2025-03-12 04:20:34,913 - distributed.http.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: python -m pip install jupyter-server-proxy +2025-03-12 04:20:34,931 - distributed.scheduler - INFO - State start +2025-03-12 04:20:34,951 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:40271 +2025-03-12 04:20:34,952 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35781/status +2025-03-12 04:20:34,952 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:20:35,123 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:35433' +2025-03-12 04:20:35,178 - distributed.nanny - INFO - Start Nanny at: 'tls://127.0.0.1:41891' +2025-03-12 04:20:37,071 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:36025 +2025-03-12 04:20:37,072 - distributed.worker - INFO - Listening to: tls://127.0.0.1:36025 +2025-03-12 04:20:37,072 - distributed.worker - INFO - Worker name: 1 +2025-03-12 04:20:37,072 - distributed.worker - INFO - dashboard at: 127.0.0.1:41633 +2025-03-12 04:20:37,072 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:40271 +2025-03-12 04:20:37,072 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:20:37,072 - distributed.worker - INFO - Threads: 2 +2025-03-12 04:20:37,072 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:20:37,072 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-2dcs6j7x +2025-03-12 04:20:37,072 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:20:37,124 - distributed.worker - INFO - Start worker at: tls://127.0.0.1:37403 +2025-03-12 04:20:37,125 - distributed.worker - INFO - Listening to: tls://127.0.0.1:37403 +2025-03-12 04:20:37,125 - distributed.worker - INFO - Worker name: 0 +2025-03-12 04:20:37,125 - distributed.worker - INFO - dashboard at: 127.0.0.1:42875 +2025-03-12 04:20:37,125 - distributed.worker - INFO - Waiting to connect to: tls://127.0.0.1:40271 +2025-03-12 04:20:37,125 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:20:37,125 - distributed.worker - INFO - Threads: 1 +2025-03-12 04:20:37,125 - distributed.worker - INFO - Memory: 58.76 GiB +2025-03-12 04:20:37,125 - distributed.worker - INFO - Local Directory: /tmp/dask-scratch-space/worker-tdhixey9 +2025-03-12 04:20:37,125 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:20:38,082 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:36025 name: 1 +2025-03-12 04:20:38,951 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:36025 +2025-03-12 04:20:38,952 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:46302 +2025-03-12 04:20:38,953 - distributed.scheduler - INFO - Register worker addr: tls://127.0.0.1:37403 name: 0 +2025-03-12 04:20:38,954 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:20:38,955 - distributed.worker - INFO - Registered to: tls://127.0.0.1:40271 +2025-03-12 04:20:38,955 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:20:38,956 - distributed.worker - INFO - Starting Worker plugin shuffle +2025-03-12 04:20:38,957 - distributed.worker - INFO - Registered to: tls://127.0.0.1:40271 +2025-03-12 04:20:38,957 - distributed.worker - INFO - ------------------------------------------------- +2025-03-12 04:20:38,962 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40271 +2025-03-12 04:20:38,953 - distributed.scheduler - INFO - Starting worker compute stream, tls://127.0.0.1:37403 +2025-03-12 04:20:38,966 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:46306 +2025-03-12 04:20:38,969 - distributed.core - INFO - Connection to tls://127.0.0.1:46302 has been closed. +2025-03-12 04:20:38,969 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:36025 name: 1 (stimulus_id='handle-worker-cleanup-1741702838.9695618') +2025-03-12 04:20:38,970 - distributed.core - INFO - Connection to tls://127.0.0.1:40271 has been closed. +2025-03-12 04:20:38,970 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:36025. Reason: worker-handle-scheduler-connection-broken +2025-03-12 04:20:38,978 - distributed.core - INFO - Starting established connection to tls://127.0.0.1:40271 +2025-03-12 04:20:39,015 - distributed.nanny - INFO - Closing Nanny gracefully at 'tls://127.0.0.1:41891'. Reason: worker-handle-scheduler-connection-broken +2025-03-12 04:20:39,023 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:20:39,042 - distributed.nanny - INFO - Worker closed +2025-03-12 04:20:41,078 - distributed.nanny - ERROR - Worker process died unexpectedly +2025-03-12 04:20:42,086 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41891'. Reason: nanny-close-gracefully +2025-03-12 04:20:42,086 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41891' closed. +2025-03-12 04:21:09,027 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35433'. Reason: nanny-close +2025-03-12 04:21:09,027 - distributed.nanny - INFO - Nanny asking worker to close. Reason: nanny-close +2025-03-12 04:21:09,038 - distributed.worker - INFO - Stopping worker at tls://127.0.0.1:37403. Reason: nanny-close +2025-03-12 04:21:09,038 - distributed.worker - INFO - Removing Worker plugin shuffle +2025-03-12 04:21:09,039 - distributed.core - INFO - Connection to tls://127.0.0.1:40271 has been closed. +2025-03-12 04:21:09,041 - distributed.core - INFO - Received 'close-stream' from tls://127.0.0.1:46306; closing. +2025-03-12 04:21:09,041 - distributed.scheduler - INFO - Remove worker addr: tls://127.0.0.1:37403 name: 0 (stimulus_id='handle-worker-cleanup-1741702869.0415552') +2025-03-12 04:21:09,041 - distributed.scheduler - INFO - Lost all workers +2025-03-12 04:21:09,058 - distributed.nanny - INFO - Worker closed +2025-03-12 04:21:09,830 - distributed.nanny - WARNING - Worker process still alive after 0.8 seconds, killing +2025-03-12 04:21:09,862 - distributed.nanny - INFO - Worker process 828007 was killed by signal 9 +2025-03-12 04:21:09,862 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35433' closed. +2025-03-12 04:21:09,863 - distributed.scheduler - INFO - Closing scheduler. Reason: unknown +2025-03-12 04:21:09,863 - distributed.scheduler - INFO - Scheduler closing all comms +2025-03-12 04:21:09,863 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Cluster creation timeout; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory s, ws = await start_cluster( @@ -12094,17 +13425,17 @@ File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 806, in start_cluster raise TimeoutError("Cluster creation timeout") TimeoutError: Cluster creation timeout -2026-04-13 07:32:42,680 - distributed.scheduler - INFO - State start -2026-04-13 07:32:42,683 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41995 -2026-04-13 07:32:42,684 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:32987/status -2026-04-13 07:32:42,684 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:42,700 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41189'. Reason: failure-to-start- -2026-04-13 07:32:42,700 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41189' closed. -2026-04-13 07:32:42,700 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46137'. Reason: failure-to-start- -2026-04-13 07:32:42,700 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46137' closed. -2026-04-13 07:32:42,701 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41995': TLS handshake failed with remote 'tls://127.0.0.1:43250': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:42,701 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41995': TLS handshake failed with remote 'tls://127.0.0.1:43254': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:42,701 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:10,871 - distributed.scheduler - INFO - State start +2025-03-12 04:21:10,883 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41501 +2025-03-12 04:21:10,884 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46733/status +2025-03-12 04:21:10,884 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:10,916 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37463'. Reason: failure-to-start- +2025-03-12 04:21:10,917 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37463' closed. +2025-03-12 04:21:10,917 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38039'. Reason: failure-to-start- +2025-03-12 04:21:10,917 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38039' closed. +2025-03-12 04:21:10,917 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41501': TLS handshake failed with remote 'tls://127.0.0.1:53138': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:10,926 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41501': TLS handshake failed with remote 'tls://127.0.0.1:53154': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:10,926 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12173,17 +13504,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:43,712 - distributed.scheduler - INFO - State start -2026-04-13 07:32:43,720 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33293 -2026-04-13 07:32:43,720 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37103/status -2026-04-13 07:32:43,720 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:43,742 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44433'. Reason: failure-to-start- -2026-04-13 07:32:43,743 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44433' closed. -2026-04-13 07:32:43,743 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33603'. Reason: failure-to-start- -2026-04-13 07:32:43,743 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33603' closed. -2026-04-13 07:32:43,743 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33293': TLS handshake failed with remote 'tls://127.0.0.1:37560': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:43,743 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33293': TLS handshake failed with remote 'tls://127.0.0.1:37564': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:43,744 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:11,935 - distributed.scheduler - INFO - State start +2025-03-12 04:21:11,947 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44295 +2025-03-12 04:21:11,947 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46813/status +2025-03-12 04:21:11,947 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:11,977 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33549'. Reason: failure-to-start- +2025-03-12 04:21:11,977 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33549' closed. +2025-03-12 04:21:11,977 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45997'. Reason: failure-to-start- +2025-03-12 04:21:11,977 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45997' closed. +2025-03-12 04:21:11,990 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44295': TLS handshake failed with remote 'tls://127.0.0.1:45490': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:11,990 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44295': TLS handshake failed with remote 'tls://127.0.0.1:45494': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:11,990 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12252,17 +13583,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:44,752 - distributed.scheduler - INFO - State start -2026-04-13 07:32:44,755 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37273 -2026-04-13 07:32:44,756 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46205/status -2026-04-13 07:32:44,756 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:44,764 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36339'. Reason: failure-to-start- -2026-04-13 07:32:44,765 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36339' closed. -2026-04-13 07:32:44,765 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36957'. Reason: failure-to-start- -2026-04-13 07:32:44,765 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36957' closed. -2026-04-13 07:32:44,765 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37273': TLS handshake failed with remote 'tls://127.0.0.1:47544': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:44,765 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37273': TLS handshake failed with remote 'tls://127.0.0.1:47552': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:44,765 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:12,995 - distributed.scheduler - INFO - State start +2025-03-12 04:21:13,007 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43985 +2025-03-12 04:21:13,007 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37271/status +2025-03-12 04:21:13,007 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:13,035 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42847'. Reason: failure-to-start- +2025-03-12 04:21:13,035 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42847' closed. +2025-03-12 04:21:13,035 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46581'. Reason: failure-to-start- +2025-03-12 04:21:13,036 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46581' closed. +2025-03-12 04:21:13,036 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43985': TLS handshake failed with remote 'tls://127.0.0.1:33526': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:13,036 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43985': TLS handshake failed with remote 'tls://127.0.0.1:33530': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:13,036 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12331,17 +13662,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:45,772 - distributed.scheduler - INFO - State start -2026-04-13 07:32:45,779 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46029 -2026-04-13 07:32:45,780 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38487/status -2026-04-13 07:32:45,780 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:45,797 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41307'. Reason: failure-to-start- -2026-04-13 07:32:45,798 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41307' closed. -2026-04-13 07:32:45,798 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45499'. Reason: failure-to-start- -2026-04-13 07:32:45,798 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45499' closed. -2026-04-13 07:32:45,803 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46029': TLS handshake failed with remote 'tls://127.0.0.1:38536': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:45,803 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46029': TLS handshake failed with remote 'tls://127.0.0.1:38540': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:45,803 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:14,051 - distributed.scheduler - INFO - State start +2025-03-12 04:21:14,066 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44047 +2025-03-12 04:21:14,067 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33227/status +2025-03-12 04:21:14,067 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:14,100 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39699'. Reason: failure-to-start- +2025-03-12 04:21:14,101 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39699' closed. +2025-03-12 04:21:14,101 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43927'. Reason: failure-to-start- +2025-03-12 04:21:14,101 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43927' closed. +2025-03-12 04:21:14,101 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44047': TLS handshake failed with remote 'tls://127.0.0.1:60472': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:14,101 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44047': TLS handshake failed with remote 'tls://127.0.0.1:60486': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:14,110 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12410,17 +13741,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:46,808 - distributed.scheduler - INFO - State start -2026-04-13 07:32:46,816 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37171 -2026-04-13 07:32:46,816 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46057/status -2026-04-13 07:32:46,816 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:46,833 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42963'. Reason: failure-to-start- -2026-04-13 07:32:46,833 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42963' closed. -2026-04-13 07:32:46,833 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37859'. Reason: failure-to-start- -2026-04-13 07:32:46,833 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37859' closed. -2026-04-13 07:32:46,834 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37171': TLS handshake failed with remote 'tls://127.0.0.1:54580': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:46,834 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37171': TLS handshake failed with remote 'tls://127.0.0.1:54584': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:46,834 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:15,115 - distributed.scheduler - INFO - State start +2025-03-12 04:21:15,126 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37123 +2025-03-12 04:21:15,127 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41741/status +2025-03-12 04:21:15,127 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:15,159 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34097'. Reason: failure-to-start- +2025-03-12 04:21:15,159 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34097' closed. +2025-03-12 04:21:15,160 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:32945'. Reason: failure-to-start- +2025-03-12 04:21:15,160 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:32945' closed. +2025-03-12 04:21:15,160 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37123': TLS handshake failed with remote 'tls://127.0.0.1:40376': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:15,160 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37123': TLS handshake failed with remote 'tls://127.0.0.1:40384': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:15,160 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12489,17 +13820,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:47,844 - distributed.scheduler - INFO - State start -2026-04-13 07:32:47,847 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39749 -2026-04-13 07:32:47,848 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41255/status -2026-04-13 07:32:47,848 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:47,865 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41203'. Reason: failure-to-start- -2026-04-13 07:32:47,865 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41203' closed. -2026-04-13 07:32:47,865 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35173'. Reason: failure-to-start- -2026-04-13 07:32:47,866 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35173' closed. -2026-04-13 07:32:47,866 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39749': TLS handshake failed with remote 'tls://127.0.0.1:58052': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:47,866 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39749': TLS handshake failed with remote 'tls://127.0.0.1:58060': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:47,874 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:16,175 - distributed.scheduler - INFO - State start +2025-03-12 04:21:16,191 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44315 +2025-03-12 04:21:16,191 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45171/status +2025-03-12 04:21:16,191 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:16,228 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45963'. Reason: failure-to-start- +2025-03-12 04:21:16,228 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45963' closed. +2025-03-12 04:21:16,228 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46661'. Reason: failure-to-start- +2025-03-12 04:21:16,229 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46661' closed. +2025-03-12 04:21:16,229 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44315': TLS handshake failed with remote 'tls://127.0.0.1:36512': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:16,229 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44315': TLS handshake failed with remote 'tls://127.0.0.1:36528': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:16,229 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12568,17 +13899,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:48,880 - distributed.scheduler - INFO - State start -2026-04-13 07:32:48,887 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35617 -2026-04-13 07:32:48,888 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:42383/status -2026-04-13 07:32:48,888 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:48,905 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46607'. Reason: failure-to-start- -2026-04-13 07:32:48,905 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46607' closed. -2026-04-13 07:32:48,905 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33421'. Reason: failure-to-start- -2026-04-13 07:32:48,905 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33421' closed. -2026-04-13 07:32:48,905 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35617': TLS handshake failed with remote 'tls://127.0.0.1:46888': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:48,905 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35617': TLS handshake failed with remote 'tls://127.0.0.1:46890': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:48,906 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:17,243 - distributed.scheduler - INFO - State start +2025-03-12 04:21:17,250 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33097 +2025-03-12 04:21:17,251 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40459/status +2025-03-12 04:21:17,251 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:17,271 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43459'. Reason: failure-to-start- +2025-03-12 04:21:17,271 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43459' closed. +2025-03-12 04:21:17,271 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41635'. Reason: failure-to-start- +2025-03-12 04:21:17,271 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41635' closed. +2025-03-12 04:21:17,272 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33097': TLS handshake failed with remote 'tls://127.0.0.1:54688': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:17,272 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33097': TLS handshake failed with remote 'tls://127.0.0.1:54692': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:17,272 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12647,17 +13978,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:49,912 - distributed.scheduler - INFO - State start -2026-04-13 07:32:49,915 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44405 -2026-04-13 07:32:49,916 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36045/status -2026-04-13 07:32:49,916 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:49,933 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40409'. Reason: failure-to-start- -2026-04-13 07:32:49,933 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40409' closed. -2026-04-13 07:32:49,933 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35931'. Reason: failure-to-start- -2026-04-13 07:32:49,933 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35931' closed. -2026-04-13 07:32:49,934 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44405': TLS handshake failed with remote 'tls://127.0.0.1:60442': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:49,934 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44405': TLS handshake failed with remote 'tls://127.0.0.1:60456': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:49,934 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:18,279 - distributed.scheduler - INFO - State start +2025-03-12 04:21:18,294 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38827 +2025-03-12 04:21:18,295 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38499/status +2025-03-12 04:21:18,295 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:18,336 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37103'. Reason: failure-to-start- +2025-03-12 04:21:18,336 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37103' closed. +2025-03-12 04:21:18,336 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41395'. Reason: failure-to-start- +2025-03-12 04:21:18,336 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41395' closed. +2025-03-12 04:21:18,337 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38827': TLS handshake failed with remote 'tls://127.0.0.1:52614': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:18,337 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38827': TLS handshake failed with remote 'tls://127.0.0.1:52624': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:18,337 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12726,17 +14057,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:50,948 - distributed.scheduler - INFO - State start -2026-04-13 07:32:50,951 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39413 -2026-04-13 07:32:50,952 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45071/status -2026-04-13 07:32:50,952 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:50,968 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37327'. Reason: failure-to-start- -2026-04-13 07:32:50,969 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37327' closed. -2026-04-13 07:32:50,969 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39529'. Reason: failure-to-start- -2026-04-13 07:32:50,969 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39529' closed. -2026-04-13 07:32:50,969 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39413': TLS handshake failed with remote 'tls://127.0.0.1:39824': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:50,969 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39413': TLS handshake failed with remote 'tls://127.0.0.1:39840': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:50,969 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:19,359 - distributed.scheduler - INFO - State start +2025-03-12 04:21:19,370 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45535 +2025-03-12 04:21:19,371 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38627/status +2025-03-12 04:21:19,371 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:19,400 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34937'. Reason: failure-to-start- +2025-03-12 04:21:19,401 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34937' closed. +2025-03-12 04:21:19,401 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38073'. Reason: failure-to-start- +2025-03-12 04:21:19,401 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38073' closed. +2025-03-12 04:21:19,401 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45535': TLS handshake failed with remote 'tls://127.0.0.1:42976': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:19,414 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45535': TLS handshake failed with remote 'tls://127.0.0.1:42984': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:19,414 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12805,17 +14136,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:51,984 - distributed.scheduler - INFO - State start -2026-04-13 07:32:51,992 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41461 -2026-04-13 07:32:51,992 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:43205/status -2026-04-13 07:32:51,992 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:52,010 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34297'. Reason: failure-to-start- -2026-04-13 07:32:52,010 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34297' closed. -2026-04-13 07:32:52,010 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46197'. Reason: failure-to-start- -2026-04-13 07:32:52,010 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46197' closed. -2026-04-13 07:32:52,011 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41461': TLS handshake failed with remote 'tls://127.0.0.1:33244': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:52,011 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41461': TLS handshake failed with remote 'tls://127.0.0.1:33248': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:52,011 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:20,419 - distributed.scheduler - INFO - State start +2025-03-12 04:21:20,430 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:42931 +2025-03-12 04:21:20,431 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46119/status +2025-03-12 04:21:20,431 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:20,477 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39585'. Reason: failure-to-start- +2025-03-12 04:21:20,477 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39585' closed. +2025-03-12 04:21:20,477 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37405'. Reason: failure-to-start- +2025-03-12 04:21:20,490 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37405' closed. +2025-03-12 04:21:20,490 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42931': TLS handshake failed with remote 'tls://127.0.0.1:52114': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:20,490 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:42931': TLS handshake failed with remote 'tls://127.0.0.1:52128': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:20,491 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12884,17 +14215,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:53,016 - distributed.scheduler - INFO - State start -2026-04-13 07:32:53,024 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38473 -2026-04-13 07:32:53,024 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34883/status -2026-04-13 07:32:53,024 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:53,045 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44203'. Reason: failure-to-start- -2026-04-13 07:32:53,045 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44203' closed. -2026-04-13 07:32:53,045 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46719'. Reason: failure-to-start- -2026-04-13 07:32:53,045 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46719' closed. -2026-04-13 07:32:53,046 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38473': TLS handshake failed with remote 'tls://127.0.0.1:34222': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:53,046 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38473': TLS handshake failed with remote 'tls://127.0.0.1:34234': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:53,046 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:21,495 - distributed.scheduler - INFO - State start +2025-03-12 04:21:21,518 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37113 +2025-03-12 04:21:21,519 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38595/status +2025-03-12 04:21:21,519 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:21,572 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41867'. Reason: failure-to-start- +2025-03-12 04:21:21,572 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41867' closed. +2025-03-12 04:21:21,573 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46581'. Reason: failure-to-start- +2025-03-12 04:21:21,573 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46581' closed. +2025-03-12 04:21:21,602 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37113': TLS handshake failed with remote 'tls://127.0.0.1:56086': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:21,603 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37113': TLS handshake failed with remote 'tls://127.0.0.1:56100': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:21,603 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -12963,17 +14294,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:54,052 - distributed.scheduler - INFO - State start -2026-04-13 07:32:54,055 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33751 -2026-04-13 07:32:54,055 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37355/status -2026-04-13 07:32:54,056 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:54,073 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41247'. Reason: failure-to-start- -2026-04-13 07:32:54,073 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41247' closed. -2026-04-13 07:32:54,073 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35215'. Reason: failure-to-start- -2026-04-13 07:32:54,073 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35215' closed. -2026-04-13 07:32:54,073 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33751': TLS handshake failed with remote 'tls://127.0.0.1:57520': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:54,074 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33751': TLS handshake failed with remote 'tls://127.0.0.1:57528': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:54,074 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:22,607 - distributed.scheduler - INFO - State start +2025-03-12 04:21:22,623 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37207 +2025-03-12 04:21:22,623 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33223/status +2025-03-12 04:21:22,624 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:22,653 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44037'. Reason: failure-to-start- +2025-03-12 04:21:22,653 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44037' closed. +2025-03-12 04:21:22,653 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43379'. Reason: failure-to-start- +2025-03-12 04:21:22,662 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43379' closed. +2025-03-12 04:21:22,662 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37207': TLS handshake failed with remote 'tls://127.0.0.1:59772': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:22,662 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37207': TLS handshake failed with remote 'tls://127.0.0.1:59780': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:22,663 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13042,17 +14373,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:55,088 - distributed.scheduler - INFO - State start -2026-04-13 07:32:55,091 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46357 -2026-04-13 07:32:55,091 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36691/status -2026-04-13 07:32:55,092 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:55,107 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34939'. Reason: failure-to-start- -2026-04-13 07:32:55,108 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34939' closed. -2026-04-13 07:32:55,108 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39257'. Reason: failure-to-start- -2026-04-13 07:32:55,108 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39257' closed. -2026-04-13 07:32:55,108 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46357': TLS handshake failed with remote 'tls://127.0.0.1:55064': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:55,108 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46357': TLS handshake failed with remote 'tls://127.0.0.1:55080': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:55,108 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:23,667 - distributed.scheduler - INFO - State start +2025-03-12 04:21:23,682 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34011 +2025-03-12 04:21:23,683 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44715/status +2025-03-12 04:21:23,683 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:23,712 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38813'. Reason: failure-to-start- +2025-03-12 04:21:23,712 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38813' closed. +2025-03-12 04:21:23,712 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38435'. Reason: failure-to-start- +2025-03-12 04:21:23,712 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38435' closed. +2025-03-12 04:21:23,713 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34011': TLS handshake failed with remote 'tls://127.0.0.1:37240': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:23,713 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34011': TLS handshake failed with remote 'tls://127.0.0.1:37254': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:23,713 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13121,17 +14452,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:56,124 - distributed.scheduler - INFO - State start -2026-04-13 07:32:56,127 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:37197 -2026-04-13 07:32:56,128 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40917/status -2026-04-13 07:32:56,128 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:56,145 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42131'. Reason: failure-to-start- -2026-04-13 07:32:56,145 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42131' closed. -2026-04-13 07:32:56,145 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36959'. Reason: failure-to-start- -2026-04-13 07:32:56,145 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36959' closed. -2026-04-13 07:32:56,146 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37197': TLS handshake failed with remote 'tls://127.0.0.1:38150': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:56,146 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:37197': TLS handshake failed with remote 'tls://127.0.0.1:38152': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:56,146 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:24,727 - distributed.scheduler - INFO - State start +2025-03-12 04:21:24,739 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:38289 +2025-03-12 04:21:24,740 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:32971/status +2025-03-12 04:21:24,740 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:24,773 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41447'. Reason: failure-to-start- +2025-03-12 04:21:24,773 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41447' closed. +2025-03-12 04:21:24,773 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41713'. Reason: failure-to-start- +2025-03-12 04:21:24,773 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41713' closed. +2025-03-12 04:21:24,786 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38289': TLS handshake failed with remote 'tls://127.0.0.1:52802': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:24,786 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:38289': TLS handshake failed with remote 'tls://127.0.0.1:52816': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:24,786 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13200,17 +14531,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:57,160 - distributed.scheduler - INFO - State start -2026-04-13 07:32:57,163 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43097 -2026-04-13 07:32:57,164 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40489/status -2026-04-13 07:32:57,164 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:57,179 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39609'. Reason: failure-to-start- -2026-04-13 07:32:57,180 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39609' closed. -2026-04-13 07:32:57,180 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37617'. Reason: failure-to-start- -2026-04-13 07:32:57,180 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37617' closed. -2026-04-13 07:32:57,180 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43097': TLS handshake failed with remote 'tls://127.0.0.1:49016': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:57,180 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43097': TLS handshake failed with remote 'tls://127.0.0.1:49018': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:57,181 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:25,791 - distributed.scheduler - INFO - State start +2025-03-12 04:21:25,806 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39819 +2025-03-12 04:21:25,807 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:39719/status +2025-03-12 04:21:25,807 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:25,836 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38201'. Reason: failure-to-start- +2025-03-12 04:21:25,836 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38201' closed. +2025-03-12 04:21:25,836 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35363'. Reason: failure-to-start- +2025-03-12 04:21:25,836 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35363' closed. +2025-03-12 04:21:25,837 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39819': TLS handshake failed with remote 'tls://127.0.0.1:43618': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:25,837 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39819': TLS handshake failed with remote 'tls://127.0.0.1:43622': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:25,837 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13279,17 +14610,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:58,196 - distributed.scheduler - INFO - State start -2026-04-13 07:32:58,199 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34617 -2026-04-13 07:32:58,200 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37627/status -2026-04-13 07:32:58,200 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:58,217 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44345'. Reason: failure-to-start- -2026-04-13 07:32:58,217 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44345' closed. -2026-04-13 07:32:58,217 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42173'. Reason: failure-to-start- -2026-04-13 07:32:58,217 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42173' closed. -2026-04-13 07:32:58,218 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34617': TLS handshake failed with remote 'tls://127.0.0.1:59184': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:58,218 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34617': TLS handshake failed with remote 'tls://127.0.0.1:59194': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:58,218 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:26,855 - distributed.scheduler - INFO - State start +2025-03-12 04:21:26,860 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34361 +2025-03-12 04:21:26,860 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40061/status +2025-03-12 04:21:26,860 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:26,880 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41311'. Reason: failure-to-start- +2025-03-12 04:21:26,880 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41311' closed. +2025-03-12 04:21:26,880 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39909'. Reason: failure-to-start- +2025-03-12 04:21:26,881 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39909' closed. +2025-03-12 04:21:26,882 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34361': TLS handshake failed with remote 'tls://127.0.0.1:44372': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:26,882 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34361': TLS handshake failed with remote 'tls://127.0.0.1:44386': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:26,882 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13358,17 +14689,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:32:59,224 - distributed.scheduler - INFO - State start -2026-04-13 07:32:59,227 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:39503 -2026-04-13 07:32:59,228 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:34401/status -2026-04-13 07:32:59,229 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:32:59,245 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:41103'. Reason: failure-to-start- -2026-04-13 07:32:59,245 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:41103' closed. -2026-04-13 07:32:59,246 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34533'. Reason: failure-to-start- -2026-04-13 07:32:59,246 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34533' closed. -2026-04-13 07:32:59,254 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39503': TLS handshake failed with remote 'tls://127.0.0.1:38624': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:59,255 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:39503': TLS handshake failed with remote 'tls://127.0.0.1:38630': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:32:59,255 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:27,888 - distributed.scheduler - INFO - State start +2025-03-12 04:21:27,895 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:35685 +2025-03-12 04:21:27,896 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:38759/status +2025-03-12 04:21:27,896 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:27,912 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34989'. Reason: failure-to-start- +2025-03-12 04:21:27,912 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34989' closed. +2025-03-12 04:21:27,912 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42779'. Reason: failure-to-start- +2025-03-12 04:21:27,912 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42779' closed. +2025-03-12 04:21:27,912 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35685': TLS handshake failed with remote 'tls://127.0.0.1:58554': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:27,912 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:35685': TLS handshake failed with remote 'tls://127.0.0.1:58560': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:27,912 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13437,17 +14768,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:00,261 - distributed.scheduler - INFO - State start -2026-04-13 07:33:00,265 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43243 -2026-04-13 07:33:00,265 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36181/status -2026-04-13 07:33:00,265 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:00,291 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43711'. Reason: failure-to-start- -2026-04-13 07:33:00,291 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43711' closed. -2026-04-13 07:33:00,291 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39557'. Reason: failure-to-start- -2026-04-13 07:33:00,291 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39557' closed. -2026-04-13 07:33:00,293 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43243': TLS handshake failed with remote 'tls://127.0.0.1:57962': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:00,293 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43243': TLS handshake failed with remote 'tls://127.0.0.1:57976': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:00,293 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:28,927 - distributed.scheduler - INFO - State start +2025-03-12 04:21:28,940 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44477 +2025-03-12 04:21:28,940 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:39945/status +2025-03-12 04:21:28,940 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:28,966 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:38325'. Reason: failure-to-start- +2025-03-12 04:21:28,966 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:38325' closed. +2025-03-12 04:21:28,966 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36783'. Reason: failure-to-start- +2025-03-12 04:21:28,966 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36783' closed. +2025-03-12 04:21:28,967 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44477': TLS handshake failed with remote 'tls://127.0.0.1:40682': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:28,967 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44477': TLS handshake failed with remote 'tls://127.0.0.1:40688': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:28,967 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13516,17 +14847,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:01,300 - distributed.scheduler - INFO - State start -2026-04-13 07:33:01,308 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43721 -2026-04-13 07:33:01,308 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45973/status -2026-04-13 07:33:01,308 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:01,329 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34435'. Reason: failure-to-start- -2026-04-13 07:33:01,329 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34435' closed. -2026-04-13 07:33:01,329 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43399'. Reason: failure-to-start- -2026-04-13 07:33:01,329 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43399' closed. -2026-04-13 07:33:01,331 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43721': TLS handshake failed with remote 'tls://127.0.0.1:54942': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:01,331 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43721': TLS handshake failed with remote 'tls://127.0.0.1:54950': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:01,331 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:29,971 - distributed.scheduler - INFO - State start +2025-03-12 04:21:29,978 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33437 +2025-03-12 04:21:29,979 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37565/status +2025-03-12 04:21:29,979 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:29,999 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:37475'. Reason: failure-to-start- +2025-03-12 04:21:29,999 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:37475' closed. +2025-03-12 04:21:29,999 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44857'. Reason: failure-to-start- +2025-03-12 04:21:29,999 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44857' closed. +2025-03-12 04:21:30,000 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33437': TLS handshake failed with remote 'tls://127.0.0.1:34104': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:30,000 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33437': TLS handshake failed with remote 'tls://127.0.0.1:34114': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:30,000 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13595,17 +14926,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:02,336 - distributed.scheduler - INFO - State start -2026-04-13 07:33:02,343 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:40639 -2026-04-13 07:33:02,344 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37271/status -2026-04-13 07:33:02,345 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:02,362 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40809'. Reason: failure-to-start- -2026-04-13 07:33:02,366 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40809' closed. -2026-04-13 07:33:02,366 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36731'. Reason: failure-to-start- -2026-04-13 07:33:02,367 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36731' closed. -2026-04-13 07:33:02,367 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40639': TLS handshake failed with remote 'tls://127.0.0.1:60274': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:02,367 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:40639': TLS handshake failed with remote 'tls://127.0.0.1:60278': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:02,368 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:31,012 - distributed.scheduler - INFO - State start +2025-03-12 04:21:31,025 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34509 +2025-03-12 04:21:31,034 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:44105/status +2025-03-12 04:21:31,034 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:31,068 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45739'. Reason: failure-to-start- +2025-03-12 04:21:31,068 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45739' closed. +2025-03-12 04:21:31,068 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43429'. Reason: failure-to-start- +2025-03-12 04:21:31,068 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43429' closed. +2025-03-12 04:21:31,069 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34509': TLS handshake failed with remote 'tls://127.0.0.1:49522': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:31,069 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34509': TLS handshake failed with remote 'tls://127.0.0.1:49534': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:31,078 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13674,17 +15005,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:03,376 - distributed.scheduler - INFO - State start -2026-04-13 07:33:03,379 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34149 -2026-04-13 07:33:03,379 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:33557/status -2026-04-13 07:33:03,380 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:03,396 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36533'. Reason: failure-to-start- -2026-04-13 07:33:03,397 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36533' closed. -2026-04-13 07:33:03,397 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44071'. Reason: failure-to-start- -2026-04-13 07:33:03,397 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44071' closed. -2026-04-13 07:33:03,397 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34149': TLS handshake failed with remote 'tls://127.0.0.1:46396': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:03,397 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34149': TLS handshake failed with remote 'tls://127.0.0.1:46402': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:03,398 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:32,083 - distributed.scheduler - INFO - State start +2025-03-12 04:21:32,095 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:45209 +2025-03-12 04:21:32,095 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:41991/status +2025-03-12 04:21:32,095 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:32,128 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:34611'. Reason: failure-to-start- +2025-03-12 04:21:32,128 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:34611' closed. +2025-03-12 04:21:32,128 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:33269'. Reason: failure-to-start- +2025-03-12 04:21:32,128 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:33269' closed. +2025-03-12 04:21:32,139 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45209': TLS handshake failed with remote 'tls://127.0.0.1:43460': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:32,139 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:45209': TLS handshake failed with remote 'tls://127.0.0.1:43470': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:32,139 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13753,17 +15084,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:04,412 - distributed.scheduler - INFO - State start -2026-04-13 07:33:04,443 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:46299 -2026-04-13 07:33:04,443 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46053/status -2026-04-13 07:33:04,444 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:04,461 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45783'. Reason: failure-to-start- -2026-04-13 07:33:04,462 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45783' closed. -2026-04-13 07:33:04,462 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:40413'. Reason: failure-to-start- -2026-04-13 07:33:04,462 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:40413' closed. -2026-04-13 07:33:04,463 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46299': TLS handshake failed with remote 'tls://127.0.0.1:37742': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:04,464 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:46299': TLS handshake failed with remote 'tls://127.0.0.1:37754': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:04,464 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:33,152 - distributed.scheduler - INFO - State start +2025-03-12 04:21:33,164 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:41429 +2025-03-12 04:21:33,165 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:46865/status +2025-03-12 04:21:33,165 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:33,207 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:35281'. Reason: failure-to-start- +2025-03-12 04:21:33,207 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:35281' closed. +2025-03-12 04:21:33,207 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:39323'. Reason: failure-to-start- +2025-03-12 04:21:33,207 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:39323' closed. +2025-03-12 04:21:33,209 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41429': TLS handshake failed with remote 'tls://127.0.0.1:36916': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:33,209 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:41429': TLS handshake failed with remote 'tls://127.0.0.1:36922': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:33,209 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -13832,254 +15163,17 @@ f"{type(self).__name__} start timed out after {timeout}s." ) from exc TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:05,468 - distributed.scheduler - INFO - State start -2026-04-13 07:33:05,472 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:43573 -2026-04-13 07:33:05,472 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:45435/status -2026-04-13 07:33:05,472 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:05,489 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45183'. Reason: failure-to-start- -2026-04-13 07:33:05,489 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45183' closed. -2026-04-13 07:33:05,489 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:36431'. Reason: failure-to-start- -2026-04-13 07:33:05,489 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:36431' closed. -2026-04-13 07:33:05,489 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43573': TLS handshake failed with remote 'tls://127.0.0.1:33202': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:05,490 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:43573': TLS handshake failed with remote 'tls://127.0.0.1:33208': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:05,490 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe - comm = await self.rpc.connect(saddr) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect - return await self._connect(addr=addr, timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect - comm = await connect( - ^^^^^^^^^^^^^^ - ...<4 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect - comm = await wait_for( - ^^^^^^^^^^^^^^^ - ...<2 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect - stream = await self.client.connect( - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ) - ^ - File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect - af, addr, stream = await connector.start(connect_timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -asyncio.exceptions.CancelledError - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start - await wait_for(self.start_unsafe(), timeout=timeout) - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for - async with asyncio.timeout(timeout): - ~~~~~~~~~~~~~~~^^^^^^^^^ - File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__ - raise TimeoutError from exc_val -TimeoutError - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory - s, ws = await start_cluster( - ^^^^^^^^^^^^^^^^^^^^ - ...<9 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster - await asyncio.gather(*workers) - File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable - return await awaitable - ^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start - raise asyncio.TimeoutError( - f"{type(self).__name__} start timed out after {timeout}s." - ) from exc -TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:06,504 - distributed.scheduler - INFO - State start -2026-04-13 07:33:06,509 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:33915 -2026-04-13 07:33:06,509 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:35521/status -2026-04-13 07:33:06,509 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:06,526 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42021'. Reason: failure-to-start- -2026-04-13 07:33:06,534 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42021' closed. -2026-04-13 07:33:06,534 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44107'. Reason: failure-to-start- -2026-04-13 07:33:06,535 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44107' closed. -2026-04-13 07:33:06,536 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33915': TLS handshake failed with remote 'tls://127.0.0.1:57952': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:06,536 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:33915': TLS handshake failed with remote 'tls://127.0.0.1:57968': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:06,537 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe - comm = await self.rpc.connect(saddr) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect - return await self._connect(addr=addr, timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect - comm = await connect( - ^^^^^^^^^^^^^^ - ...<4 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect - comm = await wait_for( - ^^^^^^^^^^^^^^^ - ...<2 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect - stream = await self.client.connect( - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ) - ^ - File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect - af, addr, stream = await connector.start(connect_timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -asyncio.exceptions.CancelledError - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start - await wait_for(self.start_unsafe(), timeout=timeout) - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for - async with asyncio.timeout(timeout): - ~~~~~~~~~~~~~~~^^^^^^^^^ - File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__ - raise TimeoutError from exc_val -TimeoutError - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory - s, ws = await start_cluster( - ^^^^^^^^^^^^^^^^^^^^ - ...<9 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster - await asyncio.gather(*workers) - File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable - return await awaitable - ^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start - raise asyncio.TimeoutError( - f"{type(self).__name__} start timed out after {timeout}s." - ) from exc -TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:07,544 - distributed.scheduler - INFO - State start -2026-04-13 07:33:07,547 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44887 -2026-04-13 07:33:07,548 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:37951/status -2026-04-13 07:33:07,548 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:07,565 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:45735'. Reason: failure-to-start- -2026-04-13 07:33:07,565 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:45735' closed. -2026-04-13 07:33:07,565 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46059'. Reason: failure-to-start- -2026-04-13 07:33:07,565 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46059' closed. -2026-04-13 07:33:07,565 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44887': TLS handshake failed with remote 'tls://127.0.0.1:54186': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:07,566 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44887': TLS handshake failed with remote 'tls://127.0.0.1:54196': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:07,566 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/nanny.py", line 358, in start_unsafe - comm = await self.rpc.connect(saddr) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1485, in connect - return await self._connect(addr=addr, timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 1429, in _connect - comm = await connect( - ^^^^^^^^^^^^^^ - ...<4 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/core.py", line 342, in connect - comm = await wait_for( - ^^^^^^^^^^^^^^^ - ...<2 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for - return await fut - ^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/comm/tcp.py", line 546, in connect - stream = await self.client.connect( - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - ) - ^ - File "/usr/lib/python3/dist-packages/tornado/tcpclient.py", line 279, in connect - af, addr, stream = await connector.start(connect_timeout=timeout) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -asyncio.exceptions.CancelledError - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 528, in start - await wait_for(self.start_unsafe(), timeout=timeout) - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1913, in wait_for - async with asyncio.timeout(timeout): - ~~~~~~~~~~~~~~~^^^^^^^^^ - File "/usr/lib/python3.13/asyncio/timeouts.py", line 116, in __aexit__ - raise TimeoutError from exc_val -TimeoutError - -The above exception was the direct cause of the following exception: - -Traceback (most recent call last): - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 974, in _cluster_factory - s, ws = await start_cluster( - ^^^^^^^^^^^^^^^^^^^^ - ...<9 lines>... - ) - ^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils_test.py", line 791, in start_cluster - await asyncio.gather(*workers) - File "/usr/lib/python3.13/asyncio/tasks.py", line 737, in _wrap_awaitable - return await awaitable - ^^^^^^^^^^^^^^^ - File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/core.py", line 531, in start - raise asyncio.TimeoutError( - f"{type(self).__name__} start timed out after {timeout}s." - ) from exc -TimeoutError: Nanny start timed out after 0s. -2026-04-13 07:33:08,580 - distributed.scheduler - INFO - State start -2026-04-13 07:33:08,585 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:44543 -2026-04-13 07:33:08,585 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:40313/status -2026-04-13 07:33:08,586 - distributed.scheduler - INFO - Registering Worker plugin shuffle -2026-04-13 07:33:08,612 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:42651'. Reason: failure-to-start- -2026-04-13 07:33:08,612 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:42651' closed. -2026-04-13 07:33:08,612 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:46487'. Reason: failure-to-start- -2026-04-13 07:33:08,612 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:46487' closed. -2026-04-13 07:33:08,613 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44543': TLS handshake failed with remote 'tls://127.0.0.1:35050': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:08,613 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:44543': TLS handshake failed with remote 'tls://127.0.0.1:35058': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) -2026-04-13 07:33:08,613 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying +2025-03-12 04:21:34,223 - distributed.scheduler - INFO - State start +2025-03-12 04:21:34,235 - distributed.scheduler - INFO - Scheduler at: tls://127.0.0.1:34081 +2025-03-12 04:21:34,235 - distributed.scheduler - INFO - dashboard at: http://127.0.0.1:36285/status +2025-03-12 04:21:34,235 - distributed.scheduler - INFO - Registering Worker plugin shuffle +2025-03-12 04:21:34,278 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:43521'. Reason: failure-to-start- +2025-03-12 04:21:34,278 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:43521' closed. +2025-03-12 04:21:34,278 - distributed.nanny - INFO - Closing Nanny at 'tls://127.0.0.1:44343'. Reason: failure-to-start- +2025-03-12 04:21:34,278 - distributed.nanny - INFO - Nanny at 'tls://127.0.0.1:44343' closed. +2025-03-12 04:21:34,279 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34081': TLS handshake failed with remote 'tls://127.0.0.1:52622': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:34,279 - distributed.comm.tcp - WARNING - Listener on 'tls://127.0.0.1:34081': TLS handshake failed with remote 'tls://127.0.0.1:52630': [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1029) +2025-03-12 04:21:34,279 - distributed.utils_test - ERROR - Failed to start gen_cluster: TimeoutError: Nanny start timed out after 0s.; retrying Traceback (most recent call last): File "/build/reproducible-path/dask.distributed-2024.12.1+ds/.pybuild/cpython3_3.13_distributed/build/distributed/utils.py", line 1914, in wait_for return await fut @@ -14149,17 +15243,14 @@ ) from exc TimeoutError: Nanny start timed out after 0s. ============================= slowest 20 durations ============================= -60.01s call distributed/tests/test_tls_functional.py::test_retire_workers -3.35s call distributed/tests/test_tls_functional.py::test_nanny +60.03s call distributed/tests/test_tls_functional.py::test_nanny 0.00s setup distributed/tests/test_tls_functional.py::test_nanny -0.00s teardown distributed/tests/test_tls_functional.py::test_retire_workers 0.00s teardown distributed/tests/test_tls_functional.py::test_nanny -0.00s setup distributed/tests/test_tls_functional.py::test_retire_workers =========================== short test summary info ============================ -FAILED distributed/tests/test_tls_functional.py::test_retire_workers - TimeoutError -==================== 1 failed, 1 passed in 64.00s (0:01:04) ==================== -*** END OF RUN 2: NOT ALL TESTS HAVE YET PASSED/XFAILED *** -*** STARTING RUN 3: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task +FAILED distributed/tests/test_tls_functional.py::test_nanny - TimeoutError +========================= 1 failed in 62.03s (0:01:02) ========================= +*** END OF RUN 3: NOT ALL TESTS HAVE YET PASSED/XFAILED *** +*** STARTING RUN 4: python3.13 -m pytest --pyargs distributed --verbose --color=no --timeout-method=signal --timeout=300 -m not avoid_ci -rfE --last-failed --last-failed-no-failures none --ignore=distributed/comm/tests/test_comms.py --ignore=distributed/comm/tests/test_ws.py --ignore=distributed/deploy/tests/test_adaptive.py --ignore=distributed/deploy/tests/test_local.py --ignore=distributed/deploy/tests/test_slow_adaptive.py --ignore=distributed/deploy/tests/test_spec_cluster.py --deselect=distributed/cli/tests/test_dask_scheduler.py::test_no_dashboard --deselect=distributed/deploy/tests/test_local.py::test_localcluster_get_client --deselect=distributed/deploy/tests/test_old_ssh.py::test_cluster --deselect=distributed/deploy/tests/test_old_ssh.py::test_old_ssh_nprocs_renamed_to_n_workers --deselect=distributed/deploy/tests/test_old_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/deploy/tests/test_ssh.py::test_nprocs_attribute_is_deprecated --deselect=distributed/http/tests/test_core.py::test_prometheus_api_doc --deselect=distributed/tests/test_init.py::test_git_revision --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout_returned --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_jupyter.py::test_shutsdown_cleanly --deselect=distributed/tests/test_profile.py::test_stack_overflow --deselect=distributed/tests/test_pubsub.py::test_client_worker --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_spill.py::test_spillbuffer_oserror --deselect=distributed/tests/test_steal.py::test_steal_twice --deselect=distributed/tests/test_utils_test.py::test_cluster --deselect=distributed/tests/test_variable.py::test_variable_in_task --deselect=distributed/tests/test_worker.py::test_process_executor_kills_process --deselect=distributed/tests/test_worker_memory.py::test_fail_to_pickle_execute_1 --deselect=distributed/tests/test_worker_state_machine.py::test_task_state_instance_are_garbage_collected --deselect=distributed/protocol/tests/test_protocol.py::test_deeply_nested_structures --deselect=distributed/protocol/tests/test_serialize.py::test_deeply_nested_structures --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_spec.py::test_errors --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/dashboard/tests/test_scheduler_bokeh.py::test_counters --deselect=distributed/dashboard/tests/test_worker_bokeh.py::test_counters --deselect=distributed/deploy/tests/test_local.py::test_adapt_then_manual --deselect=distributed/deploy/tests/test_local.py::test_async_with --deselect=distributed/deploy/tests/test_local.py::test_close_twice --deselect=distributed/deploy/tests/test_local.py::test_cluster_info_sync --deselect=distributed/deploy/tests/test_local.py::test_local_tls --deselect=distributed/deploy/tests/test_local.py::test_no_dangling_asyncio_tasks --deselect=distributed/deploy/tests/test_local.py::test_only_local_access --deselect=distributed/deploy/tests/test_local.py::test_remote_access --deselect=distributed/diagnostics/tests/test_progress_widgets.py::test_serializers --deselect=distributed/diagnostics/tests/test_scheduler_plugin.py::test_lifecycle --deselect=distributed/http/scheduler/tests/test_missing_bokeh.py::test_missing_bokeh --deselect=distributed/http/scheduler/tests/test_scheduler_http.py::test_metrics_when_prometheus_client_not_installed --deselect=distributed/protocol/tests/test_serialize.py::test_errors --deselect=distributed/tests/test_batched.py::test_BatchedSend --deselect=distributed/tests/test_batched.py::test_close_closed --deselect=distributed/tests/test_batched.py::test_close_twice --deselect=distributed/tests/test_batched.py::test_send_after_stream_start --deselect=distributed/tests/test_batched.py::test_send_before_close --deselect=distributed/tests/test_batched.py::test_send_before_start --deselect=distributed/tests/test_batched.py::test_sending_traffic_jam --deselect=distributed/tests/test_batched.py::test_serializers --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::TestClientSecurityLoader::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_async_with --deselect=distributed/tests/test_client.py::test_client_is_quiet_cluster_close --deselect=distributed/tests/test_client.py::test_dashboard_link_cluster --deselect=distributed/tests/test_client.py::test_dashboard_link_inproc --deselect=distributed/tests/test_client.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_client.py::test_mixing_clients_different_scheduler --deselect=distributed/tests/test_client.py::test_quiet_client_close --deselect=distributed/tests/test_client.py::test_rebalance_sync --deselect=distributed/tests/test_client.py::test_repr_localcluster --deselect=distributed/tests/test_client.py::test_security_loader --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_explicit_security_provided --deselect=distributed/tests/test_client.py::test_security_loader_ignored_if_returns_none --deselect=distributed/tests/test_client.py::test_shutdown --deselect=distributed/tests/test_client.py::test_shutdown_is_quiet_with_cluster --deselect=distributed/tests/test_client.py::test_shutdown_localcluster --deselect=distributed/tests/test_client.py::test_shutdown_stops_callbacks --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_start_new_loop --deselect=distributed/tests/test_client_loop.py::test_close_loop_sync_use_running_loop --deselect=distributed/tests/test_core.py::test_close_fast_without_active_handlers --deselect=distributed/tests/test_core.py::test_close_grace_period_for_handlers --deselect=distributed/tests/test_core.py::test_close_properly --deselect=distributed/tests/test_core.py::test_compression --deselect=distributed/tests/test_core.py::test_connection_pool --deselect=distributed/tests/test_core.py::test_connection_pool_close_while_connecting --deselect=distributed/tests/test_core.py::test_connection_pool_detects_remote_close --deselect=distributed/tests/test_core.py::test_connection_pool_outside_cancellation --deselect=distributed/tests/test_core.py::test_connection_pool_remove --deselect=distributed/tests/test_core.py::test_connection_pool_respects_limit --deselect=distributed/tests/test_core.py::test_connection_pool_tls --deselect=distributed/tests/test_core.py::test_counters --deselect=distributed/tests/test_core.py::test_deserialize_error --deselect=distributed/tests/test_core.py::test_errors --deselect=distributed/tests/test_core.py::test_identity_inproc --deselect=distributed/tests/test_core.py::test_identity_tcp --deselect=distributed/tests/test_core.py::test_large_packets_inproc --deselect=distributed/tests/test_core.py::test_messages_are_ordered_bsend --deselect=distributed/tests/test_core.py::test_messages_are_ordered_raw --deselect=distributed/tests/test_core.py::test_ports --deselect=distributed/tests/test_core.py::test_rpc_default --deselect=distributed/tests/test_core.py::test_rpc_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_default --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_inproc --deselect=distributed/tests/test_core.py::test_rpc_message_lifetime_tcp --deselect=distributed/tests/test_core.py::test_rpc_serialization --deselect=distributed/tests/test_core.py::test_rpc_tcp --deselect=distributed/tests/test_core.py::test_rpc_tls --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_inproc --deselect=distributed/tests/test_core.py::test_rpc_with_many_connections_tcp --deselect=distributed/tests/test_core.py::test_send_recv_args --deselect=distributed/tests/test_core.py::test_send_recv_cancelled --deselect=distributed/tests/test_core.py::test_server --deselect=distributed/tests/test_core.py::test_server_comms_mark_active_handlers --deselect=distributed/tests/test_core.py::test_server_raises_on_blocked_handlers --deselect=distributed/tests/test_jupyter.py::test_jupyter_idle_timeout --deselect=distributed/tests/test_jupyter.py::test_jupyter_server --deselect=distributed/tests/test_locks.py::test_errors --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_scheduler_file --deselect=distributed/tests/test_nanny.py::test_worker_uses_same_host_as_nanny --deselect=distributed/tests/test_preload.py::test_failure_doesnt_crash_scheduler --deselect=distributed/tests/test_preload.py::test_preload_import_time --deselect=distributed/tests/test_preload.py::test_preload_manager_sequence --deselect=distributed/tests/test_preload.py::test_worker_preload_text --deselect=distributed/tests/test_scheduler.py::test_allowed_failures_config --deselect=distributed/tests/test_scheduler.py::test_async_context_manager --deselect=distributed/tests/test_scheduler.py::test_dashboard_host --deselect=distributed/tests/test_scheduler.py::test_file_descriptors_dont_leak --deselect=distributed/tests/test_scheduler.py::test_finished --deselect=distributed/tests/test_scheduler.py::test_multiple_listeners --deselect=distributed/tests/test_scheduler.py::test_no_dangling_asyncio_tasks --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_scheduler.py::test_scheduler_file --deselect=distributed/tests/test_security.py::test_require_encryption --deselect=distributed/tests/test_security.py::test_tls_listen_connect --deselect=distributed/tests/test_security.py::test_tls_temporary_credentials_functional --deselect=distributed/tests/test_semaphore.py::test_threadpoolworkers_pick_correct_ioloop --deselect=distributed/tests/test_tls_functional.py::test_security_dict_input_no_security --deselect=distributed/tests/test_utils_test.py::test_ensure_no_new_clients --deselect=distributed/tests/test_utils_test.py::test_freeze_batched_send --deselect=distributed/tests/test_utils_test.py::test_locked_comm_drop_in_replacement --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_read --deselect=distributed/tests/test_utils_test.py::test_locked_comm_intercept_write --deselect=distributed/tests/test_worker.py::test_host_uses_scheduler_protocol --deselect=distributed/tests/test_worker.py::test_plugin_exception --deselect=distributed/tests/test_worker.py::test_plugin_internal_exception --deselect=distributed/tests/test_worker.py::test_plugin_multiple_exceptions --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker.py::test_scheduler_file --deselect=distributed/tests/test_worker_client.py::test_dont_override_default_get --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_allowlist --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_scheduler.py::test_dashboard_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_defaults --deselect=distributed/cli/tests/test_dask_scheduler.py::test_hostport --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_protocols --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers --deselect=distributed/cli/tests/test_dask_scheduler.py::test_multiple_workers_2 --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_command_default --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_config --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_file --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_preload_remote_module --deselect=distributed/cli/tests/test_dask_scheduler.py::test_scheduler_port_zero --deselect=distributed/cli/tests/test_dask_scheduler.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_contact_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_dashboard_non_standard_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_error_during_startup --deselect=distributed/cli/tests/test_dask_worker.py::test_integer_names --deselect=distributed/cli/tests/test_dask_worker.py::test_listen_address_ipv6 --deselect=distributed/cli/tests/test_dask_worker.py::test_local_directory --deselect=distributed/cli/tests/test_dask_worker.py::test_memory_limit --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_port_range_too_many_workers_raises --deselect=distributed/cli/tests/test_dask_worker.py::test_nanny_worker_ports --deselect=distributed/cli/tests/test_dask_worker.py::test_no_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_auto --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_expands_name --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_negative --deselect=distributed/cli/tests/test_dask_worker.py::test_nworkers_requires_nanny --deselect=distributed/cli/tests/test_dask_worker.py::test_preload_config --deselect=distributed/cli/tests/test_dask_worker.py::test_resources --deselect=distributed/cli/tests/test_dask_worker.py::test_respect_host_listen_address --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_address_env --deselect=distributed/cli/tests/test_dask_worker.py::test_scheduler_file --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_restart_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_set_lifetime_stagger_via_env_var --deselect=distributed/cli/tests/test_dask_worker.py::test_signal_handling --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_deprecated --deselect=distributed/cli/tests/test_dask_worker.py::test_single_executable_works --deselect=distributed/cli/tests/test_dask_worker.py::test_timeout --deselect=distributed/cli/tests/test_dask_worker.py::test_worker_class --deselect=distributed/tests/test_config.py::test_logging_extended --deselect=distributed/tests/test_config.py::test_logging_file_config --deselect=distributed/tests/test_config.py::test_logging_mutual_exclusive --deselect=distributed/tests/test_config.py::test_logging_simple --deselect=distributed/tests/test_config.py::test_logging_simple_under_distributed --deselect=distributed/tests/test_queues.py::test_queue_in_task --deselect=distributed/tests/test_variable.py::test_variable_in_task ============================= test session starts ============================== platform linux -- Python 3.13.2, pytest-8.3.4, pluggy-1.5.0 -- /usr/bin/python3.13 cachedir: .pytest_cache @@ -14172,14 +15263,14 @@ collecting ... collected 1 item run-last-failure: rerun previous 1 failure (skipped 148 files) -distributed/tests/test_tls_functional.py::test_retire_workers PASSED [100%] +distributed/tests/test_tls_functional.py::test_nanny PASSED [100%] ============================= slowest 20 durations ============================= -4.66s call distributed/tests/test_tls_functional.py::test_retire_workers -0.00s setup distributed/tests/test_tls_functional.py::test_retire_workers -0.00s teardown distributed/tests/test_tls_functional.py::test_retire_workers -============================== 1 passed in 4.85s =============================== -*** END OF RUN 3: ALL TESTS RUN HAVE NOW PASSED/XFAILED *** +5.02s call distributed/tests/test_tls_functional.py::test_nanny +0.00s setup distributed/tests/test_tls_functional.py::test_nanny +0.00s teardown distributed/tests/test_tls_functional.py::test_nanny +============================== 1 passed in 5.32s =============================== +*** END OF RUN 4: ALL TESTS RUN HAVE NOW PASSED/XFAILED *** create-stamp debian/debhelper-build-stamp dh_testroot -O--buildsystem=pybuild dh_prep -O--buildsystem=pybuild @@ -14220,8 +15311,8 @@ dh_gencontrol -O--buildsystem=pybuild dh_md5sums -O--buildsystem=pybuild dh_builddeb -O--buildsystem=pybuild -dpkg-deb: building package 'python-distributed-doc' in '../python-distributed-doc_2024.12.1+ds-1_all.deb'. dpkg-deb: building package 'python3-distributed' in '../python3-distributed_2024.12.1+ds-1_all.deb'. +dpkg-deb: building package 'python-distributed-doc' in '../python-distributed-doc_2024.12.1+ds-1_all.deb'. dpkg-genbuildinfo --build=binary -O../dask.distributed_2024.12.1+ds-1_arm64.buildinfo dpkg-genchanges --build=binary -O../dask.distributed_2024.12.1+ds-1_arm64.changes dpkg-genchanges: info: binary-only upload (no source code included) @@ -14230,12 +15321,14 @@ dpkg-buildpackage: info: binary-only upload (no source included) dpkg-genchanges: info: including full source code in upload I: copying local configuration +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/B01_cleanup starting +I: user script /srv/workspace/pbuilder/130149/tmp/hooks/B01_cleanup finished I: unmounting dev/ptmx filesystem I: unmounting dev/pts filesystem I: unmounting dev/shm filesystem I: unmounting proc filesystem I: unmounting sys filesystem I: cleaning the build env -I: removing directory /srv/workspace/pbuilder/367956 and its subdirectories -I: Current time: Mon Apr 13 07:33:33 -12 2026 -I: pbuilder-time-stamp: 1776108813 +I: removing directory /srv/workspace/pbuilder/130149 and its subdirectories +I: Current time: Wed Mar 12 04:22:58 +14 2025 +I: pbuilder-time-stamp: 1741702978