Bug 1896126 - Update taskcluster-taskgraph vendor to 8.1.0, r=taskgraph-reviewers,mach-reviewers,gabriel

Differential Revision: https://phabricator.services.mozilla.com/D210054
This commit is contained in:
Andrew Halberstadt 2024-05-14 13:31:13 +00:00
parent b72e1816e8
commit 5e12349e05
20 changed files with 237 additions and 28 deletions

View file

@ -0,0 +1,68 @@
Metadata-Version: 2.1
Name: dlmanager
Version: 0.1.1
Summary: download manager library
Home-page: http://github.com/parkouss/dlmanager
Author: Julien Pagès
Author-email: j.parkouss@gmail.com
License: GPL/LGPL
.. image:: https://badge.fury.io/py/dlmanager.svg
:target: https://pypi.python.org/pypi/dlmanager
.. image:: https://readthedocs.org/projects/dlmanager/badge/?version=latest
:target: http://dlmanager.readthedocs.org/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://travis-ci.org/parkouss/dlmanager.svg?branch=master
:target: https://travis-ci.org/parkouss/dlmanager
.. image:: https://codecov.io/github/parkouss/dlmanager/coverage.svg?branch=master
:target: https://codecov.io/github/parkouss/dlmanager?branch=master
dlmanager
=========
**dlmanager** is Python 2 and 3 download manager library, with the following
features:
- Download files in background and in parallel
- Cancel downloads
- store downloads in a given directory, avoiding re-downloading files
- Limit the size of this directory, removing oldest files
Example
-------
.. code-block:: python
from dlmanager import DownloadManager, PersistLimit
manager = DownloadManager(
"dlmanager-destir",
persist_limit=PersistLimit(
size_limit=1073741824, # 1 GB max
file_limit=10, # force to keep 10 files even if size_limit is reached
)
)
# Start downloads in background
# Note that if files are already present, this is a no-op.
manager.download(url1)
manager.download(url2)
# Wait for completion
try:
manager.wait()
except:
manager.cancel()
raise
Installation
------------
Use pip: ::
pip install -U dlmanager

View file

@ -0,0 +1,14 @@
README.rst
setup.cfg
setup.py
dlmanager/__init__.py
dlmanager/fs.py
dlmanager/manager.py
dlmanager/persist_limit.py
dlmanager.egg-info/PKG-INFO
dlmanager.egg-info/SOURCES.txt
dlmanager.egg-info/dependency_links.txt
dlmanager.egg-info/requires.txt
dlmanager.egg-info/top_level.txt
tests/test_manager.py
tests/test_persist_limit.py

View file

@ -0,0 +1 @@

View file

@ -0,0 +1,2 @@
requests
six

View file

@ -0,0 +1 @@
dlmanager

8
third_party/python/poetry.lock generated vendored
View file

@ -1375,14 +1375,14 @@ test = ["aiofiles", "coverage", "flake8", "httmock", "httptest", "hypothesis", "
[[package]]
name = "taskcluster-taskgraph"
version = "8.0.1"
version = "8.1.0"
description = "Build taskcluster taskgraphs"
category = "main"
optional = false
python-versions = "*"
files = [
{file = "taskcluster-taskgraph-8.0.1.tar.gz", hash = "sha256:21387537bbebab2a7b1890d03e20e49379bdda65efd45ca7fb8d01f5c29e1797"},
{file = "taskcluster_taskgraph-8.0.1-py3-none-any.whl", hash = "sha256:14500bc703f64eb002c0cd505caaf2d34ffc0ae66d109b108e738661da1ae09c"},
{file = "taskcluster-taskgraph-8.1.0.tar.gz", hash = "sha256:192dcd9fab32964cd5af7b637ef2f78e55067adb234fec102525d294ba85a976"},
{file = "taskcluster_taskgraph-8.1.0-py3-none-any.whl", hash = "sha256:38d0ed19a352df204b838364532ab535d0bded944752a7ca7ad9c5049fc270dc"},
]
[package.dependencies]
@ -1624,4 +1624,4 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=4.6)", "pytest-black (
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
content-hash = "8e72dc9ba9b4f08d27d90f99666459a814d1bb293c68de222614ea57db5b70ef"
content-hash = "5c0785aade9b3c4260f10610b2a2d67196e9931ea06f90ef9568fe032ee7fad0"

View file

@ -53,7 +53,7 @@ setuptools==68.0.0
six==1.16.0
slugid==2.0.0
taskcluster==44.2.2
taskcluster-taskgraph==8.0.1
taskcluster-taskgraph==8.1.0
taskcluster-urls==13.0.1
toml==0.10.2
tomlkit==0.12.3

View file

@ -539,9 +539,9 @@ six==1.16.0 ; python_version >= "3.8" and python_version < "4.0" \
slugid==2.0.0 ; python_version >= "3.8" and python_version < "4.0" \
--hash=sha256:a950d98b72691178bdd4d6c52743c4a2aa039207cf7a97d71060a111ff9ba297 \
--hash=sha256:aec8b0e01c4ad32e38e12d609eab3ec912fd129aaf6b2ded0199b56a5f8fd67c
taskcluster-taskgraph==8.0.1 ; python_version >= "3.8" and python_version < "4.0" \
--hash=sha256:14500bc703f64eb002c0cd505caaf2d34ffc0ae66d109b108e738661da1ae09c \
--hash=sha256:21387537bbebab2a7b1890d03e20e49379bdda65efd45ca7fb8d01f5c29e1797
taskcluster-taskgraph==8.1.0 ; python_version >= "3.8" and python_version < "4.0" \
--hash=sha256:192dcd9fab32964cd5af7b637ef2f78e55067adb234fec102525d294ba85a976 \
--hash=sha256:38d0ed19a352df204b838364532ab535d0bded944752a7ca7ad9c5049fc270dc
taskcluster-urls==13.0.1 ; python_version >= "3.8" and python_version < "4.0" \
--hash=sha256:5e25e7e6818e8877178b175ff43d2e6548afad72694aa125f404a7329ece0973 \
--hash=sha256:b25e122ecec249c4299ac7b20b08db76e3e2025bdaeb699a9d444556de5fd367 \

View file

@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: taskcluster-taskgraph
Version: 8.0.1
Version: 8.1.0
Summary: Build taskcluster taskgraphs
Home-page: https://github.com/taskcluster/taskgraph
Classifier: Development Status :: 5 - Production/Stable

View file

@ -1,12 +1,12 @@
taskgraph/__init__.py,sha256=hCl3NLzC-cVXlKhuzf0-_0wd0gYmNA3oshXfTaa9DNQ,729
taskgraph/__init__.py,sha256=mzlRpJHPnQilGBE8iCNWpILt4Ao1T8yf6nHCZcRKfRI,729
taskgraph/config.py,sha256=8vntWUrPwGds22mFKYAgcsD4Mr8hoONTv2ssGBcClLw,5108
taskgraph/create.py,sha256=_zokjSM3ZaO04l2LiMhenE8qXDZVfYvueIIu5hGUhzc,5185
taskgraph/decision.py,sha256=sG0CIj9OSOdfN65LSt6dRYFWbns9_JraVC5fQU1_7oc,13012
taskgraph/decision.py,sha256=gIvVLfMTd6KtnrTFkmFTrky93mknB9dxtL7_aZwEtoA,13088
taskgraph/docker.py,sha256=rk-tAMycHnapFyR2Q-XJXzC2A4uv0i-VykLZfwl-pRo,8417
taskgraph/filter_tasks.py,sha256=R7tYXiaVPGIkQ6O1c9-QJrKZ59m9pFXCloUlPraVnZU,866
taskgraph/generator.py,sha256=zrH1zfy-8akksKTSOf6e4FEsdOd5y7-h1Jne_2Jabcc,15703
taskgraph/graph.py,sha256=bHUsv2pPa2SSaWgBY-ItIj7REPd0o4fFYrwoQbwFKTY,4680
taskgraph/main.py,sha256=tgfAEcNUJfmADteL24yJR5u7tzU4v3mzmxiogVSCK8Y,29072
taskgraph/main.py,sha256=n4p2LAN10Oo2yVv1G-cnWxK0FV2KcB9Q4H5m0K0qmw0,29171
taskgraph/morph.py,sha256=bwkaSGdTZLcK_rhF2st2mCGv9EHN5WdbnDeuZcqp9UA,9208
taskgraph/parameters.py,sha256=hrwUHHu4PS79w-fQ3qNnLSyjRto1EDlidE8e1GzIy8U,12272
taskgraph/target_tasks.py,sha256=9_v66bzmQFELPsfIDGITXrqzsmEiLq1EeuJFhycKL0M,3356
@ -24,8 +24,8 @@ taskgraph/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
taskgraph/loader/default.py,sha256=_bBJG6l04v44Jm5HSIEnVndC05NpNmq5L28QfJHk0wo,1185
taskgraph/loader/transform.py,sha256=olUBPjxk3eEIg25sduxlcyqhjoig4ts5kPlT_zs6g9g,2147
taskgraph/optimize/__init__.py,sha256=Oqpq1RW8QzOcu7zaMlNQ3BHT9ws9e_93FWfCqzNcQps,123
taskgraph/optimize/base.py,sha256=wTViUwVmY9sZvlzSuGwkVrETCo0v2OfyNxFFgzJrDNc,18982
taskgraph/optimize/strategies.py,sha256=UryFI5TizzEF_2NO8MyuKwqVektHfJeG_t0_zZwxEds,2577
taskgraph/optimize/base.py,sha256=ckr0C2qzYTyp036oDInMDRaGmieAH7t93kOy-1hXPbg,20107
taskgraph/optimize/strategies.py,sha256=bUbcnBIoufWS_BqObI632Hnlu91fXn7PrVhimXsLLOE,2549
taskgraph/run-task/fetch-content,sha256=G1aAvZlTg0yWHqxhSxi4RvfxW-KBJ5JwnGtWRqfH_bg,29990
taskgraph/run-task/hgrc,sha256=BybWLDR89bWi3pE5T05UqmDHs02CbLypE-omLZWU6Uk,896
taskgraph/run-task/robustcheckout.py,sha256=vPKvHb3fIIJli9ZVZG88XYoa8Sohy2JrpmH6pDgBDHI,30813
@ -62,7 +62,7 @@ taskgraph/util/readonlydict.py,sha256=XzTG-gqGqWVlSkDxSyOL6Ur7Z0ONhIJ9DVLWV3q4q1
taskgraph/util/schema.py,sha256=HmbbJ_i5uxZZHZSJ8sVWaD-VMhZI4ymx0STNcjO5t2M,8260
taskgraph/util/set_name.py,sha256=cha9awo2nMQ9jfSEcbyNkZkCq_1Yg_kKJTfvDzabHSc,1134
taskgraph/util/shell.py,sha256=nf__ly0Ikhj92AiEBCQtvyyckm8UfO_3DSgz0SU-7QA,1321
taskgraph/util/taskcluster.py,sha256=LScpZknMycOOneIcRMf236rCTMRHHGxFTc9Lh7mRKaI,13057
taskgraph/util/taskcluster.py,sha256=-BlQqkxxH5S2BbZ4X2c0lNd1msU2xLM1S5rr8qrLwkE,15961
taskgraph/util/taskgraph.py,sha256=ecKEvTfmLVvEKLPO_0g34CqVvc0iCzuNMh3064BZNrE,1969
taskgraph/util/templates.py,sha256=HGTaIKCpAwEzBDHq0cDai1HJjPJrdnHsjJz6N4LVpKI,2139
taskgraph/util/time.py,sha256=XauJ0DbU0fyFvHLzJLG4ehHv9KaKixxETro89GPC1yk,3350
@ -71,9 +71,9 @@ taskgraph/util/vcs.py,sha256=FjS82fiTsoQ_ArjTCDOtDGfNdVUp_8zvVKB9SoAG3Rs,18019
taskgraph/util/verify.py,sha256=htrNX7aXMMDzxymsFVcs0kaO5gErFHd62g9cQsZI_WE,8518
taskgraph/util/workertypes.py,sha256=1wgM6vLrlgtyv8854anVIs0Bx11kV8JJJaKcOHJc2j0,2498
taskgraph/util/yaml.py,sha256=-LaIf3RROuaSWckOOGN5Iviu-DHWxIChgHn9a7n6ec4,1059
taskcluster_taskgraph-8.0.1.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
taskcluster_taskgraph-8.0.1.dist-info/METADATA,sha256=qg-m62f4BGLh2jBAr_-OQZhraOSciTrv5EyNY0Wwq8I,4688
taskcluster_taskgraph-8.0.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
taskcluster_taskgraph-8.0.1.dist-info/entry_points.txt,sha256=2hxDzE3qq_sHh-J3ROqwpxgQgxO-196phWAQREl2-XA,50
taskcluster_taskgraph-8.0.1.dist-info/top_level.txt,sha256=3JNeYn_hNiNXC7DrdH_vcv-WYSE7QdgGjdvUYvSjVp0,10
taskcluster_taskgraph-8.0.1.dist-info/RECORD,,
taskcluster_taskgraph-8.1.0.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
taskcluster_taskgraph-8.1.0.dist-info/METADATA,sha256=k3wwPQzeNRQbVhvjOhAzezuNY0vImdTWbZ2e4uuMabM,4688
taskcluster_taskgraph-8.1.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
taskcluster_taskgraph-8.1.0.dist-info/entry_points.txt,sha256=2hxDzE3qq_sHh-J3ROqwpxgQgxO-196phWAQREl2-XA,50
taskcluster_taskgraph-8.1.0.dist-info/top_level.txt,sha256=3JNeYn_hNiNXC7DrdH_vcv-WYSE7QdgGjdvUYvSjVp0,10
taskcluster_taskgraph-8.1.0.dist-info/RECORD,,

View file

@ -2,7 +2,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
__version__ = "8.0.1"
__version__ = "8.1.0"
# Maximum number of dependencies a single task can have
# https://docs.taskcluster.net/reference/platform/taskcluster-queue/references/api#createTask

View file

@ -74,6 +74,8 @@ def taskgraph_decision(options, parameters=None):
* generating a set of artifacts to memorialize the graph
* calling TaskCluster APIs to create the graph
"""
if options.get("verbose"):
logging.root.setLevel(logging.DEBUG)
parameters = parameters or (
lambda graph_config: get_decision_parameters(graph_config, options)

View file

@ -697,6 +697,9 @@ def image_digest(args):
"--tasks-for", required=True, help="the tasks_for value used to generate this task"
)
@argument("--try-task-config-file", help="path to try task configuration file")
@argument(
"--verbose", "-v", action="store_true", help="include debug-level logging output"
)
def decision(options):
from taskgraph.decision import taskgraph_decision

View file

@ -22,6 +22,7 @@ from taskgraph.graph import Graph
from taskgraph.taskgraph import TaskGraph
from taskgraph.util.parameterization import resolve_task_references, resolve_timestamps
from taskgraph.util.python_path import import_sibling_modules
from taskgraph.util.taskcluster import find_task_id_batched, status_task_batched
logger = logging.getLogger(__name__)
registry = {}
@ -51,6 +52,9 @@ def optimize_task_graph(
Perform task optimization, returning a taskgraph and a map from label to
assigned taskId, including replacement tasks.
"""
# avoid circular import
from taskgraph.optimize.strategies import IndexSearch
label_to_taskid = {}
if not existing_tasks:
existing_tasks = {}
@ -70,6 +74,23 @@ def optimize_task_graph(
do_not_optimize=do_not_optimize,
)
# Gather each relevant task's index
indexes = set()
for label in target_task_graph.graph.visit_postorder():
if label in do_not_optimize:
continue
_, strategy, arg = optimizations(label)
if isinstance(strategy, IndexSearch) and arg is not None:
indexes.update(arg)
index_to_taskid = {}
taskid_to_status = {}
if indexes:
# Find their respective status using TC index/queue batch APIs
indexes = list(indexes)
index_to_taskid = find_task_id_batched(indexes)
taskid_to_status = status_task_batched(list(index_to_taskid.values()))
replaced_tasks = replace_tasks(
target_task_graph=target_task_graph,
optimizations=optimizations,
@ -78,6 +99,8 @@ def optimize_task_graph(
label_to_taskid=label_to_taskid,
existing_tasks=existing_tasks,
removed_tasks=removed_tasks,
index_to_taskid=index_to_taskid,
taskid_to_status=taskid_to_status,
)
return (
@ -259,12 +282,17 @@ def replace_tasks(
label_to_taskid,
removed_tasks,
existing_tasks,
index_to_taskid,
taskid_to_status,
):
"""
Implement the "Replacing Tasks" phase, returning a set of task labels of
all replaced tasks. The replacement taskIds are added to label_to_taskid as
a side-effect.
"""
# avoid circular import
from taskgraph.optimize.strategies import IndexSearch
opt_counts = defaultdict(int)
replaced = set()
dependents_of = target_task_graph.graph.reverse_links_dict()
@ -307,6 +335,10 @@ def replace_tasks(
deadline = max(
resolve_timestamps(now, task.task["deadline"]) for task in dependents
)
if isinstance(opt, IndexSearch):
arg = arg, index_to_taskid, taskid_to_status
repl = opt.should_replace_task(task, params, deadline, arg)
if repl:
if repl is True:
@ -316,7 +348,7 @@ def replace_tasks(
removed_tasks.add(label)
else:
logger.debug(
f"replace_tasks: {label} replaced by optimization strategy"
f"replace_tasks: {label} replaced with {repl} by optimization strategy"
)
label_to_taskid[label] = repl
replaced.add(label)

View file

@ -3,7 +3,6 @@ from datetime import datetime
from taskgraph.optimize.base import OptimizationStrategy, register_strategy
from taskgraph.util.path import match as match_path
from taskgraph.util.taskcluster import find_task_id, status_task
logger = logging.getLogger(__name__)
@ -22,12 +21,14 @@ class IndexSearch(OptimizationStrategy):
fmt = "%Y-%m-%dT%H:%M:%S.%fZ"
def should_replace_task(self, task, params, deadline, index_paths):
def should_replace_task(self, task, params, deadline, arg):
"Look for a task with one of the given index paths"
index_paths, label_to_taskid, taskid_to_status = arg
for index_path in index_paths:
try:
task_id = find_task_id(index_path)
status = status_task(task_id)
task_id = label_to_taskid[index_path]
status = taskid_to_status[task_id]
# status can be `None` if we're in `testing` mode
# (e.g. test-action-callback)
if not status or status.get("state") in ("exception", "failed"):
@ -40,7 +41,7 @@ class IndexSearch(OptimizationStrategy):
return task_id
except KeyError:
# 404 will end up here and go on to the next index path
# go on to the next index path
pass
return False

View file

@ -193,6 +193,48 @@ def find_task_id(index_path, use_proxy=False):
return response.json()["taskId"]
def find_task_id_batched(index_paths, use_proxy=False):
"""Gets the task id of multiple tasks given their respective index.
Args:
index_paths (List[str]): A list of task indexes.
use_proxy (bool): Whether to use taskcluster-proxy (default: False)
Returns:
Dict[str, str]: A dictionary object mapping each valid index path
to its respective task id.
See the endpoint here:
https://docs.taskcluster.net/docs/reference/core/index/api#findTasksAtIndex
"""
endpoint = liburls.api(get_root_url(use_proxy), "index", "v1", "tasks/indexes")
task_ids = {}
continuation_token = None
while True:
response = _do_request(
endpoint,
json={
"indexes": index_paths,
},
params={"continuationToken": continuation_token},
)
response_data = response.json()
if not response_data["tasks"]:
break
response_tasks = response_data["tasks"]
if (len(task_ids) + len(response_tasks)) > len(index_paths):
# Sanity check
raise ValueError("more task ids were returned than were asked for")
task_ids.update((t["namespace"], t["taskId"]) for t in response_tasks)
continuationToken = response_data.get("continuationToken")
if continuationToken is None:
break
return task_ids
def get_artifact_from_index(index_path, artifact_path, use_proxy=False):
full_path = index_path + "/artifacts/" + artifact_path
response = _do_request(get_index_url(full_path, use_proxy))
@ -271,6 +313,49 @@ def status_task(task_id, use_proxy=False):
return status
def status_task_batched(task_ids, use_proxy=False):
"""Gets the status of multiple tasks given task_ids.
In testing mode, just logs that it would have retrieved statuses.
Args:
task_id (List[str]): A list of task ids.
use_proxy (bool): Whether to use taskcluster-proxy (default: False)
Returns:
dict: A dictionary object as defined here:
https://docs.taskcluster.net/docs/reference/platform/queue/api#statuses
"""
if testing:
logger.info(f"Would have gotten status for {len(task_ids)} tasks.")
return
endpoint = liburls.api(get_root_url(use_proxy), "queue", "v1", "tasks/status")
statuses = {}
continuation_token = None
while True:
response = _do_request(
endpoint,
json={
"taskIds": task_ids,
},
params={
"continuationToken": continuation_token,
},
)
response_data = response.json()
if not response_data["statuses"]:
break
response_tasks = response_data["statuses"]
if (len(statuses) + len(response_tasks)) > len(task_ids):
raise ValueError("more task statuses were returned than were asked for")
statuses.update((t["taskId"], t["status"]) for t in response_tasks)
continuationToken = response_data.get("continuationToken")
if continuationToken is None:
break
return statuses
def state_task(task_id, use_proxy=False):
"""Gets the state of a task given a task_id.