diff --git a/docs/v3/resources/upgrade-to-prefect-3.mdx b/docs/v3/resources/upgrade-to-prefect-3.mdx index cbbffa5294ca..6336d995d465 100644 --- a/docs/v3/resources/upgrade-to-prefect-3.mdx +++ b/docs/v3/resources/upgrade-to-prefect-3.mdx @@ -40,14 +40,15 @@ pip install -U 'prefect[aws]' ### Pydantic V2 - -This change affects you if: you use custom Pydantic models with Prefect features. - - Prefect 3.0 is built with Pydantic 2.0 for improved performance. All Prefect objects will automatically upgrade, but if you use custom Pydantic models for flow parameters or custom blocks, you'll need to ensure they are compatible with Pydantic 2.0. You can continue to use Pydantic 1.0 models in your own code if they do not interact directly with Prefect. Refer to [Pydantic's migration guide](https://docs.pydantic.dev/latest/migration/) for detailed information on necessary changes. + +We recommend pausing all deployment schedules prior to upgrading. +Because of differences in Pydantic datetime handling that affect the scheduler's idempotency logic, there is a small risk of the scheduler duplicating runs in its first loop. + + ### Module location and name changes Some less-commonly used modules have been renamed, reorganized, or removed for clarity. The old import paths will continue to be supported for 6 months, but emit deprecation warnings. You can look at the [deprecation code](https://github.com/PrefectHQ/prefect/blob/main/src/prefect/_internal/compatibility/migration.py) to see a full list of affected paths. diff --git a/src/prefect/task_engine.py b/src/prefect/task_engine.py index c5861fc4bbc1..93729a7f334b 100644 --- a/src/prefect/task_engine.py +++ b/src/prefect/task_engine.py @@ -249,7 +249,7 @@ def log_finished_message(self) -> None: display_state = repr(self.state) if PREFECT_DEBUG_MODE else str(self.state) level = logging.INFO if self.state.is_completed() else logging.ERROR msg = f"Finished in state {display_state}" - if self.state.is_pending(): + if self.state.is_pending() and self.state.name != "NotReady": msg += ( "\nPlease wait for all submitted tasks to complete" " before exiting your flow by calling `.wait()` on the " diff --git a/tests/test_task_engine.py b/tests/test_task_engine.py index 384c7fb6c530..979cedfffbb4 100644 --- a/tests/test_task_engine.py +++ b/tests/test_task_engine.py @@ -35,7 +35,7 @@ from prefect.results import ResultRecord, ResultStore from prefect.server.schemas.core import ConcurrencyLimitV2 from prefect.settings import PREFECT_TASK_DEFAULT_RETRIES, temporary_settings -from prefect.states import Completed, Running, State +from prefect.states import Completed, Pending, Running, State from prefect.task_engine import ( AsyncTaskRunEngine, SyncTaskRunEngine, @@ -101,6 +101,37 @@ async def test_set_task_run_state_duplicated_timestamp(self): assert new_state == completed_state assert new_state.timestamp > running_state.timestamp + def test_logs_message_when_submitted_tasks_end_in_pending(self, caplog): + """ + If submitted tasks aren't waited on before a flow exits, they may fail to run + because they're transition from PENDING to RUNNING is denied. This test ensures + that a message is logged when this happens. + """ + engine = SyncTaskRunEngine(task=foo) + with engine.initialize_run(): + assert engine.state.is_pending() + + assert ( + "Please wait for all submitted tasks to complete before exiting your flow" + in caplog.text + ) + + def test_doesnt_log_message_when_submitted_tasks_end_in_not_ready(self, caplog): + """ + Regression test for tasks that didn't run because of upstream issues, not because of + a lack of wait call. See https://github.com/PrefectHQ/prefect/issues/16848 + """ + + engine = SyncTaskRunEngine(task=foo) + with engine.initialize_run(): + assert engine.state.is_pending() + engine.set_state(Pending(name="NotReady")) + + assert ( + "Please wait for all submitted tasks to complete before exiting your flow" + not in caplog.text + ) + class TestAsyncTaskRunEngine: async def test_basic_init(self): diff --git a/tests/test_tasks.py b/tests/test_tasks.py index 0377845b9481..dc9800def3bc 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -910,35 +910,6 @@ def my_flow(): with pytest.raises(ValueError, match="deadlock"): my_flow() - @pytest.mark.skip( - reason="This test is not compatible with the current state of client side task orchestration" - ) - def test_logs_message_when_submitted_tasks_end_in_pending(self, caplog): - """ - If submitted tasks aren't waited on before a flow exits, they may fail to run - because they're transition from PENDING to RUNNING is denied. This test ensures - that a message is logged when this happens. - """ - - @task - def find_palindromes(): - """This is a computationally expensive task that never ends, - allowing the flow to exit before the task is completed.""" - num = 10 - while True: - _ = str(num) == str(num)[::-1] - num += 1 - - @flow - def test_flow(): - find_palindromes.submit() - - test_flow() - assert ( - "Please wait for all submitted tasks to complete before exiting your flow" - in caplog.text - ) - class TestTaskStates: @pytest.mark.parametrize("error", [ValueError("Hello"), None])