Skip to content

Commit 26c14f2

Browse files
Truncate long job-derived operation names across wait flows
Co-authored-by: Shri Sukhani <shrisukhani@users.noreply.github.com>
1 parent f1567a1 commit 26c14f2

22 files changed

Lines changed: 114 additions & 50 deletions

hyperbrowser/client/managers/async_manager/agents/browser_use.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import Optional
22

33
from hyperbrowser.exceptions import HyperbrowserError
4-
from ....polling import wait_for_job_result_async
4+
from ....polling import build_operation_name, wait_for_job_result_async
55
from ....schema_utils import resolve_schema_input
66

77
from .....models import (
@@ -61,9 +61,10 @@ async def start_and_wait(
6161
job_id = job_start_resp.job_id
6262
if not job_id:
6363
raise HyperbrowserError("Failed to start browser-use task job")
64+
operation_name = build_operation_name("browser-use task job ", job_id)
6465

6566
return await wait_for_job_result_async(
66-
operation_name=f"browser-use task job {job_id}",
67+
operation_name=operation_name,
6768
get_status=lambda: self.get_status(job_id).status,
6869
is_terminal_status=lambda status: (
6970
status in {"completed", "failed", "stopped"}

hyperbrowser/client/managers/async_manager/agents/claude_computer_use.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import Optional
22

33
from hyperbrowser.exceptions import HyperbrowserError
4-
from ....polling import wait_for_job_result_async
4+
from ....polling import build_operation_name, wait_for_job_result_async
55

66
from .....models import (
77
POLLING_ATTEMPTS,
@@ -55,9 +55,10 @@ async def start_and_wait(
5555
job_id = job_start_resp.job_id
5656
if not job_id:
5757
raise HyperbrowserError("Failed to start Claude Computer Use task job")
58+
operation_name = build_operation_name("Claude Computer Use task job ", job_id)
5859

5960
return await wait_for_job_result_async(
60-
operation_name=f"Claude Computer Use task job {job_id}",
61+
operation_name=operation_name,
6162
get_status=lambda: self.get_status(job_id).status,
6263
is_terminal_status=lambda status: (
6364
status in {"completed", "failed", "stopped"}

hyperbrowser/client/managers/async_manager/agents/cua.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import Optional
22

33
from hyperbrowser.exceptions import HyperbrowserError
4-
from ....polling import wait_for_job_result_async
4+
from ....polling import build_operation_name, wait_for_job_result_async
55

66
from .....models import (
77
POLLING_ATTEMPTS,
@@ -53,9 +53,10 @@ async def start_and_wait(
5353
job_id = job_start_resp.job_id
5454
if not job_id:
5555
raise HyperbrowserError("Failed to start CUA task job")
56+
operation_name = build_operation_name("CUA task job ", job_id)
5657

5758
return await wait_for_job_result_async(
58-
operation_name=f"CUA task job {job_id}",
59+
operation_name=operation_name,
5960
get_status=lambda: self.get_status(job_id).status,
6061
is_terminal_status=lambda status: (
6162
status in {"completed", "failed", "stopped"}

hyperbrowser/client/managers/async_manager/agents/gemini_computer_use.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import Optional
22

33
from hyperbrowser.exceptions import HyperbrowserError
4-
from ....polling import wait_for_job_result_async
4+
from ....polling import build_operation_name, wait_for_job_result_async
55

66
from .....models import (
77
POLLING_ATTEMPTS,
@@ -55,9 +55,10 @@ async def start_and_wait(
5555
job_id = job_start_resp.job_id
5656
if not job_id:
5757
raise HyperbrowserError("Failed to start Gemini Computer Use task job")
58+
operation_name = build_operation_name("Gemini Computer Use task job ", job_id)
5859

5960
return await wait_for_job_result_async(
60-
operation_name=f"Gemini Computer Use task job {job_id}",
61+
operation_name=operation_name,
6162
get_status=lambda: self.get_status(job_id).status,
6263
is_terminal_status=lambda status: (
6364
status in {"completed", "failed", "stopped"}

hyperbrowser/client/managers/async_manager/agents/hyper_agent.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from typing import Optional
22

33
from hyperbrowser.exceptions import HyperbrowserError
4-
from ....polling import wait_for_job_result_async
4+
from ....polling import build_operation_name, wait_for_job_result_async
55

66
from .....models import (
77
POLLING_ATTEMPTS,
@@ -55,9 +55,10 @@ async def start_and_wait(
5555
job_id = job_start_resp.job_id
5656
if not job_id:
5757
raise HyperbrowserError("Failed to start HyperAgent task")
58+
operation_name = build_operation_name("HyperAgent task ", job_id)
5859

5960
return await wait_for_job_result_async(
60-
operation_name=f"HyperAgent task {job_id}",
61+
operation_name=operation_name,
6162
get_status=lambda: self.get_status(job_id).status,
6263
is_terminal_status=lambda status: (
6364
status in {"completed", "failed", "stopped"}

hyperbrowser/client/managers/async_manager/crawl.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from hyperbrowser.models.consts import POLLING_ATTEMPTS
44
from ...polling import (
55
build_fetch_operation_name,
6+
build_operation_name,
67
collect_paginated_results_async,
78
poll_until_terminal_status_async,
89
retry_operation_async,
@@ -56,9 +57,10 @@ async def start_and_wait(
5657
job_id = job_start_resp.job_id
5758
if not job_id:
5859
raise HyperbrowserError("Failed to start crawl job")
60+
operation_name = build_operation_name("crawl job ", job_id)
5961

6062
job_status = await poll_until_terminal_status_async(
61-
operation_name=f"crawl job {job_id}",
63+
operation_name=operation_name,
6264
get_status=lambda: self.get_status(job_id).status,
6365
is_terminal_status=lambda status: status in {"completed", "failed"},
6466
poll_interval_seconds=poll_interval_seconds,
@@ -68,7 +70,7 @@ async def start_and_wait(
6870

6971
if not return_all_pages:
7072
return await retry_operation_async(
71-
operation_name=build_fetch_operation_name(f"crawl job {job_id}"),
73+
operation_name=build_fetch_operation_name(operation_name),
7274
operation=lambda: self.get(job_id),
7375
max_attempts=POLLING_ATTEMPTS,
7476
retry_delay_seconds=0.5,
@@ -94,7 +96,7 @@ def merge_page_response(page_response: CrawlJobResponse) -> None:
9496
job_response.error = page_response.error
9597

9698
await collect_paginated_results_async(
97-
operation_name=f"crawl job {job_id}",
99+
operation_name=operation_name,
98100
get_next_page=lambda page: self.get(
99101
job_start_resp.job_id,
100102
GetCrawlJobParams(page=page, batch_size=100),

hyperbrowser/client/managers/async_manager/extract.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
StartExtractJobParams,
99
StartExtractJobResponse,
1010
)
11-
from ...polling import wait_for_job_result_async
11+
from ...polling import build_operation_name, wait_for_job_result_async
1212
from ...schema_utils import resolve_schema_input
1313

1414

@@ -53,9 +53,10 @@ async def start_and_wait(
5353
job_id = job_start_resp.job_id
5454
if not job_id:
5555
raise HyperbrowserError("Failed to start extract job")
56+
operation_name = build_operation_name("extract job ", job_id)
5657

5758
return await wait_for_job_result_async(
58-
operation_name=f"extract job {job_id}",
59+
operation_name=operation_name,
5960
get_status=lambda: self.get_status(job_id).status,
6061
is_terminal_status=lambda status: status in {"completed", "failed"},
6162
fetch_result=lambda: self.get(job_id),

hyperbrowser/client/managers/async_manager/scrape.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from hyperbrowser.models.consts import POLLING_ATTEMPTS
44
from ...polling import (
55
build_fetch_operation_name,
6+
build_operation_name,
67
collect_paginated_results_async,
78
poll_until_terminal_status_async,
89
retry_operation_async,
@@ -63,9 +64,10 @@ async def start_and_wait(
6364
job_id = job_start_resp.job_id
6465
if not job_id:
6566
raise HyperbrowserError("Failed to start batch scrape job")
67+
operation_name = build_operation_name("batch scrape job ", job_id)
6668

6769
job_status = await poll_until_terminal_status_async(
68-
operation_name=f"batch scrape job {job_id}",
70+
operation_name=operation_name,
6971
get_status=lambda: self.get_status(job_id).status,
7072
is_terminal_status=lambda status: status in {"completed", "failed"},
7173
poll_interval_seconds=poll_interval_seconds,
@@ -75,7 +77,7 @@ async def start_and_wait(
7577

7678
if not return_all_pages:
7779
return await retry_operation_async(
78-
operation_name=build_fetch_operation_name(f"batch scrape job {job_id}"),
80+
operation_name=build_fetch_operation_name(operation_name),
7981
operation=lambda: self.get(job_id),
8082
max_attempts=POLLING_ATTEMPTS,
8183
retry_delay_seconds=0.5,
@@ -101,7 +103,7 @@ def merge_page_response(page_response: BatchScrapeJobResponse) -> None:
101103
job_response.error = page_response.error
102104

103105
await collect_paginated_results_async(
104-
operation_name=f"batch scrape job {job_id}",
106+
operation_name=operation_name,
105107
get_next_page=lambda page: self.get(
106108
job_id,
107109
params=GetBatchScrapeJobParams(page=page, batch_size=100),
@@ -156,9 +158,10 @@ async def start_and_wait(
156158
job_id = job_start_resp.job_id
157159
if not job_id:
158160
raise HyperbrowserError("Failed to start scrape job")
161+
operation_name = build_operation_name("scrape job ", job_id)
159162

160163
return await wait_for_job_result_async(
161-
operation_name=f"scrape job {job_id}",
164+
operation_name=operation_name,
162165
get_status=lambda: self.get_status(job_id).status,
163166
is_terminal_status=lambda status: status in {"completed", "failed"},
164167
fetch_result=lambda: self.get(job_id),

hyperbrowser/client/managers/async_manager/web/batch_fetch.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from hyperbrowser.exceptions import HyperbrowserError
1212
from ....polling import (
1313
build_fetch_operation_name,
14+
build_operation_name,
1415
collect_paginated_results_async,
1516
poll_until_terminal_status_async,
1617
retry_operation_async,
@@ -64,9 +65,10 @@ async def start_and_wait(
6465
job_id = job_start_resp.job_id
6566
if not job_id:
6667
raise HyperbrowserError("Failed to start batch fetch job")
68+
operation_name = build_operation_name("batch fetch job ", job_id)
6769

6870
job_status = await poll_until_terminal_status_async(
69-
operation_name=f"batch fetch job {job_id}",
71+
operation_name=operation_name,
7072
get_status=lambda: self.get_status(job_id).status,
7173
is_terminal_status=lambda status: status in {"completed", "failed"},
7274
poll_interval_seconds=poll_interval_seconds,
@@ -76,7 +78,7 @@ async def start_and_wait(
7678

7779
if not return_all_pages:
7880
return await retry_operation_async(
79-
operation_name=build_fetch_operation_name(f"batch fetch job {job_id}"),
81+
operation_name=build_fetch_operation_name(operation_name),
8082
operation=lambda: self.get(job_id),
8183
max_attempts=POLLING_ATTEMPTS,
8284
retry_delay_seconds=0.5,
@@ -102,7 +104,7 @@ def merge_page_response(page_response: BatchFetchJobResponse) -> None:
102104
job_response.error = page_response.error
103105

104106
await collect_paginated_results_async(
105-
operation_name=f"batch fetch job {job_id}",
107+
operation_name=operation_name,
106108
get_next_page=lambda page: self.get(
107109
job_id,
108110
params=GetBatchFetchJobParams(page=page, batch_size=100),

hyperbrowser/client/managers/async_manager/web/crawl.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from hyperbrowser.exceptions import HyperbrowserError
1212
from ....polling import (
1313
build_fetch_operation_name,
14+
build_operation_name,
1415
collect_paginated_results_async,
1516
poll_until_terminal_status_async,
1617
retry_operation_async,
@@ -62,9 +63,10 @@ async def start_and_wait(
6263
job_id = job_start_resp.job_id
6364
if not job_id:
6465
raise HyperbrowserError("Failed to start web crawl job")
66+
operation_name = build_operation_name("web crawl job ", job_id)
6567

6668
job_status = await poll_until_terminal_status_async(
67-
operation_name=f"web crawl job {job_id}",
69+
operation_name=operation_name,
6870
get_status=lambda: self.get_status(job_id).status,
6971
is_terminal_status=lambda status: status in {"completed", "failed"},
7072
poll_interval_seconds=poll_interval_seconds,
@@ -74,7 +76,7 @@ async def start_and_wait(
7476

7577
if not return_all_pages:
7678
return await retry_operation_async(
77-
operation_name=build_fetch_operation_name(f"web crawl job {job_id}"),
79+
operation_name=build_fetch_operation_name(operation_name),
7880
operation=lambda: self.get(job_id),
7981
max_attempts=POLLING_ATTEMPTS,
8082
retry_delay_seconds=0.5,
@@ -100,7 +102,7 @@ def merge_page_response(page_response: WebCrawlJobResponse) -> None:
100102
job_response.error = page_response.error
101103

102104
await collect_paginated_results_async(
103-
operation_name=f"web crawl job {job_id}",
105+
operation_name=operation_name,
104106
get_next_page=lambda page: self.get(
105107
job_id,
106108
params=GetWebCrawlJobParams(page=page, batch_size=100),

0 commit comments

Comments
 (0)