Skip to content

Commit f57ead1

Browse files
committedNov 22, 2019
Implement a decision task for taskcluster jobs
A decision task is the first task run, and ensures that we only schedule subsequent tasks which are relevant to the current push or pull request. This dynamic scheduling helps reduce load since we avoid spinning up workers for tasks that ultimately don't run, and unlocks new possibilities since we are able to schedule tasks that are dependent on other tasks. The tasks, their scheduling criteria and their dependnecies are specified in a a YAML format configuration file in tools/ci/tc/tasks/test.yml. This has a bespoke format, adopting some ideas from Azure and Gecko's taskcluster integration. The format is documented in `tools/ci/tc/README.md`. The data in that file undergoes trandformations to produce a set of tasks, which are then filtered according to the event that caused the decision task to run. To initially prove out the implementation of dependent tasks we make the Firefox tasks depend on a download task. But this does not yet include the work to actually make the dependent tasks download Firefox from the parent task.
1 parent 86097c0 commit f57ead1

21 files changed

+1550
-512
lines changed
 

‎.taskcluster.yml

+55-352
Large diffs are not rendered by default.

‎tools/ci/commands.json

+23-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"virtualenv": false
1515
},
1616
"tc-download": {
17-
"path": "tcdownload.py",
17+
"path": "tc/download.py",
1818
"script": "run",
1919
"parser": "get_parser",
2020
"parse_known": true,
@@ -24,5 +24,27 @@
2424
"requests",
2525
"pygithub"
2626
]
27+
},
28+
"tc-taskgraph": {
29+
"path": "tc/taskgraph.py",
30+
"script": "run",
31+
"help": "Build the taskgraph",
32+
"virtualenv": true,
33+
"install": [
34+
"requests",
35+
"pyyaml"
36+
]
37+
},
38+
"tc-decision": {
39+
"path": "tc/decision.py",
40+
"parser": "get_parser",
41+
"script": "run",
42+
"help": "Run the decision task",
43+
"virtualenv": true,
44+
"install": [
45+
"requests",
46+
"pyyaml",
47+
"taskcluster"
48+
]
2749
}
2850
}

‎tools/ci/run_tc.py

+22-58
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@
3838
import argparse
3939
import json
4040
import os
41-
import re
4241
import subprocess
4342
import sys
4443
import tempfile
@@ -99,8 +98,12 @@ def get_parser():
9998
help="Start xvfb")
10099
p.add_argument("--checkout",
101100
help="Revision to checkout before starting job")
102-
p.add_argument("job",
103-
help="Name of the job associated with the current event")
101+
p.add_argument("--install-certificates", action="store_true", default=None,
102+
help="Install web-platform.test certificates to UA store")
103+
p.add_argument("--no-install-certificates", action="store_false", default=None,
104+
help="Don't install web-platform.test certificates to UA store")
105+
p.add_argument("--rev",
106+
help="Revision that the task_head ref is expected to point to")
104107
p.add_argument("script",
105108
help="Script to run for the job")
106109
p.add_argument("script_args",
@@ -123,6 +126,12 @@ def checkout_revision(rev):
123126
subprocess.check_call(["git", "checkout", "--quiet", rev])
124127

125128

129+
def install_certificates():
130+
subprocess.check_call(["sudo", "cp", "tools/certs/cacert.pem",
131+
"/usr/local/share/ca-certificates/cacert.crt"])
132+
subprocess.check_call(["sudo", "update-ca-certificates"])
133+
134+
126135
def install_chrome(channel):
127136
if channel in ("experimental", "dev", "nightly"):
128137
deb_archive = "google-chrome-unstable_current_amd64.deb"
@@ -213,29 +222,6 @@ def start_xvfb():
213222
start(["sudo", "fluxbox", "-display", os.environ["DISPLAY"]])
214223

215224

216-
def get_extra_jobs(event):
217-
body = None
218-
jobs = set()
219-
if "commits" in event and event["commits"]:
220-
body = event["commits"][0]["message"]
221-
elif "pull_request" in event:
222-
body = event["pull_request"]["body"]
223-
224-
if not body:
225-
return jobs
226-
227-
regexp = re.compile(r"\s*tc-jobs:(.*)$")
228-
229-
for line in body.splitlines():
230-
m = regexp.match(line)
231-
if m:
232-
items = m.group(1)
233-
for item in items.split(","):
234-
jobs.add(item.strip())
235-
break
236-
return jobs
237-
238-
239225
def set_variables(event):
240226
# Set some variables that we use to get the commits on the current branch
241227
ref_prefix = "refs/heads/"
@@ -256,23 +242,13 @@ def set_variables(event):
256242
os.environ["GITHUB_BRANCH"] = branch
257243

258244

259-
def include_job(job):
260-
# Special case things that unconditionally run on pushes,
261-
# assuming a higher layer is filtering the required list of branches
262-
if (os.environ["GITHUB_PULL_REQUEST"] == "false" and
263-
job == "run-all"):
264-
return True
265-
266-
jobs_str = run([os.path.join(root, "wpt"),
267-
"test-jobs"], return_stdout=True)
268-
print(jobs_str)
269-
return job in set(jobs_str.splitlines())
270-
271-
272245
def setup_environment(args):
273246
if args.hosts_file:
274247
make_hosts_file()
275248

249+
if args.install_certificates:
250+
install_certificates()
251+
276252
if "chrome" in args.browser:
277253
assert args.channel is not None
278254
install_chrome(args.channel)
@@ -340,6 +316,13 @@ def fetch_event_data():
340316
def main():
341317
args = get_parser().parse_args()
342318

319+
if args.rev is not None:
320+
task_head = subprocess.check_output(["git", "rev-parse", "task_head"]).strip()
321+
if task_head != args.rev:
322+
print("CRITICAL: task_head points at %s, expected %s. "
323+
"This may be because the branch was updated" % (task_head, args.rev))
324+
sys.exit(1)
325+
343326
if "TASK_EVENT" in os.environ:
344327
event = json.loads(os.environ["TASK_EVENT"])
345328
else:
@@ -350,25 +333,6 @@ def main():
350333

351334
setup_repository()
352335

353-
extra_jobs = get_extra_jobs(event)
354-
355-
job = args.job
356-
357-
print("Job %s" % job)
358-
359-
run_if = [(lambda: job == "all", "job set to 'all'"),
360-
(lambda:"all" in extra_jobs, "Manually specified jobs includes 'all'"),
361-
(lambda:job in extra_jobs, "Manually specified jobs includes '%s'" % job),
362-
(lambda:include_job(job), "CI required jobs includes '%s'" % job)]
363-
364-
for fn, msg in run_if:
365-
if fn():
366-
print(msg)
367-
break
368-
else:
369-
print("Job not scheduled for this push")
370-
return
371-
372336
# Run the job
373337
setup_environment(args)
374338
os.chdir(root)

‎tools/ci/tc/README.md

+235
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,235 @@
1+
# Taskgraph Setup
2+
3+
The taskgraph is built from a YAML file. This file has two top-level
4+
properties: `components` and `tasks`. The full list of tasks is
5+
defined by the `tasks` object; each task is an object with a single
6+
property representing the task with the corresponding value an object
7+
representing the task properties. Each task requires the following
8+
top-level properties:
9+
10+
* `provisionerId`: String. Name of Taskcluster provisioner
11+
* `schedulerId`: String. Name of Taskcluster scheduler
12+
* `deadline`: String. Time until the task expires
13+
* `image`: String. Name of docker image to use for task
14+
* `maxRunTime`: Number. Maximum time in seconds for which the task can
15+
run.
16+
* `artifacts`: Object. List of artifacts and directories to upload; see
17+
Taskcluster documentation.
18+
* `command`: String. Command to run. This is automatically wrapped in a
19+
run_tc command
20+
* `options`: Optional Object. Options to pass into run_tc
21+
- xvfb: Boolean. Enable Xvfb for run
22+
- oom-killer: Boolean. Enable xvfb for run
23+
- hosts: Boolean. Update hosts file with wpt hosts before run
24+
- install-certificates: Boolean. Install wpt certs into OS
25+
certificate store for run
26+
- browser: List. List of browser names for run
27+
- channel: String. Browser channel for run
28+
* `trigger`: Object. Conditions on which to consider task. One or more
29+
of following properties:
30+
- branch: List. List of branch names on which to trigger.
31+
- pull-request: No value. Trigger for pull request actions
32+
* `schedule-if`: Optional Object. Conditions on which task should be
33+
scheduled given it meets the trigger conditions.
34+
- `run-job`: List. Job names for which this task should be considered,
35+
matching the output from `./wpt test-jobs`
36+
* `env`: Optional Object. Environment variables to set when running task.
37+
* `depends-on`: Optional list. List of task names that must be complete
38+
before the current task is scheduled.
39+
* `description`: String. Task description.
40+
* `name`: Optional String. Name to use for the task overriding the
41+
property name. This is useful in combination with substitutions
42+
described below.
43+
44+
## Task Expansions
45+
46+
Using the above syntax it's possble to describe each task
47+
directly. But typically in a taskgraph there are many common
48+
properties between tasks so it's tedious and error prone to repeat
49+
information that's common to multiple tasks. Therefore the taskgraph
50+
format provides several mechanisms to reuse partial task definitions
51+
across multiple tasks.
52+
53+
### Components
54+
55+
The other top-level property in the taskgraph format is
56+
`components`. The value of this property is an object containing named
57+
partial task definitions. Each task definition may contain a property called
58+
`use` which is a list of components to use as the basis for the task
59+
definition. The components list is evaluated in order. If a property
60+
is not previously defined in the output it is added to the output. If
61+
it was previously defined, the value is updated according to the type:
62+
* Strings and numbers are replaced with a new value
63+
* Lists are extended with the additional values
64+
* Objects are updated recursively following the above rules
65+
This means that types must always match between components and the
66+
final value.
67+
68+
For example
69+
```
70+
components:
71+
example-1:
72+
list_prop:
73+
- first
74+
- second
75+
object_prop:
76+
key1: value1
77+
key2: base_value
78+
example-2:
79+
list_prop:
80+
- third
81+
- fourth
82+
object_prop:
83+
key3:
84+
- value3-1
85+
86+
tasks:
87+
- example-task:
88+
use:
89+
- example-1
90+
- example-2
91+
object_prop:
92+
key2: value2
93+
key3:
94+
- value3-2
95+
```
96+
97+
will evaluate to the following task:
98+
99+
```
100+
example-task:
101+
list_prop:
102+
- first
103+
- second
104+
- third
105+
- fourth
106+
object_prop:
107+
key1: value1
108+
key2: value2
109+
key3:
110+
- value3-1
111+
- value3-2
112+
```
113+
114+
Note that components cannot currently define `use` properties of their own.
115+
116+
## Substitutions
117+
118+
Components and tasks can define a property `vars` that holds variables
119+
which are later substituted into the task definition using the syntax
120+
`${vars.property-name}`. For example:
121+
122+
```
123+
components:
124+
generic-component:
125+
prop: ${vars.value}
126+
127+
tasks:
128+
- first:
129+
use:
130+
- generic-component
131+
vars:
132+
value: value1
133+
- second:
134+
use:
135+
- generic-component
136+
vars:
137+
value: value2
138+
```
139+
140+
Results in the following tasks:
141+
142+
```
143+
first:
144+
prop: value1
145+
second:
146+
prop: value2
147+
```
148+
149+
## Maps
150+
151+
Instead of defining a task directly, an item in the tasks property may
152+
be an object with a single property `$map`. This object itself has two
153+
child properties; `for` and `do`. The value of `for` is a list of
154+
objects, and the value of `do` is either an object or a list of
155+
objects. For each object in the `for` property, a set of tasks is
156+
created by taking a copy of that object for each task in the `do`
157+
property, updating the object with the properties from the
158+
corresponding `do` object, using the same rules as for components
159+
above, and then processing as for a normal task. `$map` rules can also
160+
be nested.
161+
162+
Note: Although `$map` shares a name with the `$map` used in json-e
163+
(used. in `.taskcluster.yml`), the semantics are different.
164+
165+
For example
166+
167+
```
168+
components: {}
169+
tasks:
170+
$map:
171+
for:
172+
- vars:
173+
example: value1
174+
- vars:
175+
example: value2
176+
do:
177+
example-${vars.example}
178+
prop: ${vars.example}
179+
```
180+
181+
Results in the tasks
182+
183+
```
184+
example-value1:
185+
prop: value1
186+
example-value2:
187+
prop: value2
188+
```
189+
190+
Note that in combination with `$map`, variable substitutions are
191+
applied *twice*; once after the `$map` is evaluated and once after the
192+
`use` statements are evaluated.
193+
194+
## Chunks
195+
196+
A common requirements for tasks is that they are "chunked" into N
197+
partial tasks. This is handled specially in the syntax. A top level
198+
property `chunks` can be used to define the number of individual
199+
chunks to create for a specific task. Each chunked task is created
200+
with a `chunks` property set to an object containing an `id` property
201+
containing the one-based index of the chunk an a `total` property
202+
containing the total number of chunks. These can be substituted into
203+
the task definition using the same syntax as for `vars` above
204+
e.g. `${chunks.id}`. Note that because task names must be unique, it's
205+
common to specify a `name` property on the task that will override the
206+
property name e.g.
207+
208+
```
209+
components: {}
210+
tasks:
211+
- chunked-task:
212+
chunks:2
213+
command: "task-run --chunk=${chunks.id} --totalChunks=${chunks.total}"
214+
name: task-chunk-${chunks.id}
215+
```
216+
217+
creates tasks:
218+
219+
```
220+
task-chunk-1:
221+
command: "task-run --chunk=1 --totalChunks=2"
222+
task-chunk-2:
223+
command: "task-run --chunk=2 --totalChunks=2"
224+
```
225+
226+
# Overall processing model
227+
228+
The overall processing model for tasks is as follows:
229+
* Evaluate maps
230+
* Perform subsitutions
231+
* Evaluate use statements
232+
* Expand chunks
233+
* Perform subsitutions
234+
235+
At each point after maps are evaluated tasks must have a unique name.
File renamed without changes.

‎tools/ci/tc/decision.py

+314
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,314 @@
1+
import argparse
2+
import json
3+
import logging
4+
import os
5+
import re
6+
import subprocess
7+
from collections import OrderedDict
8+
9+
import taskcluster
10+
from six import iteritems, itervalues
11+
12+
from . import taskgraph
13+
14+
15+
here = os.path.abspath(os.path.dirname(__file__))
16+
17+
18+
logging.basicConfig()
19+
logger = logging.getLogger()
20+
21+
22+
def get_triggers(event):
23+
# Set some variables that we use to get the commits on the current branch
24+
ref_prefix = "refs/heads/"
25+
is_pr = "pull_request" in event
26+
branch = None
27+
if not is_pr and "ref" in event:
28+
branch = event["ref"]
29+
if branch.startswith(ref_prefix):
30+
branch = branch[len(ref_prefix):]
31+
32+
return is_pr, branch
33+
34+
35+
def fetch_event_data(queue):
36+
try:
37+
task_id = os.environ["TASK_ID"]
38+
except KeyError:
39+
logger.warning("Missing TASK_ID environment variable")
40+
# For example under local testing
41+
return None
42+
43+
task_data = queue.task(task_id)
44+
45+
return task_data.get("extra", {}).get("github_event")
46+
47+
48+
def filter_triggers(event, all_tasks):
49+
is_pr, branch = get_triggers(event)
50+
triggered = {}
51+
for name, task in iteritems(all_tasks):
52+
if "trigger" in task:
53+
if is_pr and "pull-request" in task["trigger"]:
54+
triggered[name] = task
55+
elif branch is not None and "branch" in task["trigger"]:
56+
for trigger_branch in task["trigger"]["branch"]:
57+
if (trigger_branch == branch or
58+
trigger_branch.endswith("*") and branch.startswith(trigger_branch[:-1])):
59+
triggered[name] = task
60+
logger.info("Triggers match tasks:\n * %s" % "\n * ".join(triggered.keys()))
61+
return triggered
62+
63+
64+
def get_run_jobs(event):
65+
from tools.ci import jobs
66+
revish = "%s..%s" % (event["pull_request"]["base"]["sha"]
67+
if "pull_request" in event
68+
else event["before"],
69+
event["after"])
70+
logger.info("Looking for changes in range %s" % revish)
71+
paths = jobs.get_paths(revish=revish)
72+
logger.info("Found changes in paths:%s" % "\n".join(paths))
73+
path_jobs = jobs.get_jobs(paths)
74+
all_jobs = path_jobs | get_extra_jobs(event)
75+
logger.info("Including jobs:\n * %s" % "\n * ".join(all_jobs))
76+
return all_jobs
77+
78+
79+
def get_extra_jobs(event):
80+
body = None
81+
jobs = set()
82+
if "commits" in event and event["commits"]:
83+
body = event["commits"][0]["message"]
84+
elif "pull_request" in event:
85+
body = event["pull_request"]["body"]
86+
87+
if not body:
88+
return jobs
89+
90+
regexp = re.compile(r"\s*tc-jobs:(.*)$")
91+
92+
for line in body.splitlines():
93+
m = regexp.match(line)
94+
if m:
95+
items = m.group(1)
96+
for item in items.split(","):
97+
jobs.add(item.strip())
98+
break
99+
return jobs
100+
101+
102+
def filter_schedule_if(event, tasks):
103+
scheduled = {}
104+
run_jobs = None
105+
for name, task in iteritems(tasks):
106+
if "schedule-if" in task:
107+
if "run-job" in task["schedule-if"]:
108+
if run_jobs is None:
109+
run_jobs = get_run_jobs(event)
110+
if "all" in run_jobs or any(item in run_jobs for item in task["schedule-if"]["run-job"]):
111+
scheduled[name] = task
112+
else:
113+
scheduled[name] = task
114+
logger.info("Scheduling rules match tasks:\n * %s" % "\n * ".join(scheduled.keys()))
115+
return scheduled
116+
117+
118+
def get_fetch_rev(event):
119+
is_pr, _ = get_triggers(event)
120+
if is_pr:
121+
# Try to get the actual rev so that all non-decision tasks are pinned to that
122+
ref = "refs/pull/%s/merge" % event["pull_request"]["number"]
123+
try:
124+
output = subprocess.check_output(["git", "ls-remote", "origin", ref])
125+
except subprocess.CalledProcessError:
126+
import traceback
127+
logger.error(traceback.format_exc())
128+
logger.error("Failed to get merge commit sha1")
129+
return ref, None
130+
if not output:
131+
logger.error("Failed to get merge commit")
132+
return ref, None
133+
return ref, output.split()[0]
134+
else:
135+
return event["ref"], event["after"]
136+
137+
138+
def build_full_command(event, task):
139+
fetch_ref, fetch_sha = get_fetch_rev(event)
140+
cmd_args = {
141+
"task_name": task["name"],
142+
"repo_url": event["repository"]["clone_url"],
143+
"fetch_ref": fetch_ref,
144+
"task_cmd": task["command"],
145+
"install_str": "",
146+
}
147+
148+
options = task.get("options", {})
149+
options_args = []
150+
if fetch_sha is not None:
151+
options_args.append("--rev=%s" % fetch_sha)
152+
if options.get("oom-killer"):
153+
options_args.append("--oom-killer")
154+
if options.get("xvfb"):
155+
options_args.append("--xvfb")
156+
if not options.get("hosts"):
157+
options_args.append("--no-hosts")
158+
else:
159+
options_args.append("--hosts")
160+
if options.get("checkout"):
161+
options_args.append("--checkout=%s" % options["checkout"])
162+
for browser in options.get("browser", []):
163+
options_args.append("--browser=%s" % browser)
164+
if options.get("channel"):
165+
options_args.append("--channel=%s" % options["channel"])
166+
if options.get("install-certificates"):
167+
options_args.append("--install-certificates")
168+
169+
cmd_args["options_str"] = " ".join(str(item) for item in options_args)
170+
171+
install_packages = task.get("install")
172+
if install_packages:
173+
install_items = ["apt update -qqy"]
174+
install_items.extend("apt install -qqy %s" % item
175+
for item in install_packages)
176+
cmd_args["install_str"] = "\n".join("sudo %s;" % item for item in install_items)
177+
178+
return ["/bin/bash",
179+
"--login",
180+
"-c",
181+
"""
182+
~/start.sh \
183+
%(repo_url)s \
184+
%(fetch_ref)s;
185+
%(install_str)s
186+
cd web-platform-tests;
187+
./tools/ci/run_tc.py %(options_str)s -- %(task_cmd)s;
188+
""" % cmd_args]
189+
190+
191+
def get_owner(event):
192+
pusher = event.get("pusher", {}).get("email", "")
193+
if "@" in pusher:
194+
return pusher
195+
return "web-platform-tests@users.noreply.github.com"
196+
197+
198+
def create_tc_task(event, task, taskgroup_id, depends_on_ids):
199+
command = build_full_command(event, task)
200+
task_id = taskcluster.slugId()
201+
task_data = {
202+
"taskGroupId": taskgroup_id,
203+
"created": taskcluster.fromNowJSON(""),
204+
"deadline": taskcluster.fromNowJSON(task["deadline"]),
205+
"provisionerId": task["provisionerId"],
206+
"schedulerId": task["schedulerId"],
207+
"workerType": task["workerType"],
208+
"metadata": {
209+
"name": task["name"],
210+
"description": task.get("description", ""),
211+
"owner": get_owner(event),
212+
"source": event["repository"]["clone_url"]
213+
},
214+
"payload": {
215+
"artifacts": task.get("artifacts"),
216+
"command": command,
217+
"image": task.get("image"),
218+
"maxRunTime": task.get("maxRunTime"),
219+
"env": task.get("env", {}),
220+
},
221+
"extra": {
222+
"github_event": json.dumps(event)
223+
}
224+
}
225+
if depends_on_ids:
226+
task_data["dependencies"] = depends_on_ids
227+
task_data["requires"] = "all-completed"
228+
return task_id, task_data
229+
230+
231+
def build_task_graph(event, all_tasks, tasks):
232+
task_id_map = OrderedDict()
233+
taskgroup_id = os.environ.get("TASK_ID", taskcluster.slugId())
234+
235+
def add_task(task_name, task):
236+
depends_on_ids = []
237+
if "depends-on" in task:
238+
for depends_name in task["depends-on"]:
239+
if depends_name not in task_id_map:
240+
add_task(depends_name,
241+
all_tasks[depends_name])
242+
depends_on_ids.append(task_id_map[depends_name][0])
243+
task_id, task_data = create_tc_task(event, task, taskgroup_id, depends_on_ids)
244+
task_id_map[task_name] = (task_id, task_data)
245+
246+
for task_name, task in iteritems(tasks):
247+
add_task(task_name, task)
248+
249+
return task_id_map
250+
251+
252+
def create_tasks(queue, task_id_map):
253+
for (task_id, task_data) in itervalues(task_id_map):
254+
queue.createTask(task_id, task_data)
255+
256+
257+
def get_event(queue, event_path):
258+
if event_path is not None:
259+
try:
260+
with open(event_path) as f:
261+
event_str = f.read()
262+
except IOError:
263+
logger.error("Missing event file at path %s" % event_path)
264+
raise
265+
elif "TASK_EVENT" in os.environ:
266+
event_str = os.environ["TASK_EVENT"]
267+
else:
268+
event_str = fetch_event_data(queue)
269+
if not event_str:
270+
raise ValueError("Can't find GitHub event definition; for local testing pass --event-path")
271+
try:
272+
return json.loads(event_str)
273+
except ValueError:
274+
logger.error("Event was not valid JSON")
275+
raise
276+
277+
278+
def decide(event):
279+
all_tasks = taskgraph.load_tasks_from_path(os.path.join(here, "tasks", "test.yml"))
280+
281+
triggered_tasks = filter_triggers(event, all_tasks)
282+
scheduled_tasks = filter_schedule_if(event, triggered_tasks)
283+
284+
task_id_map = build_task_graph(event, all_tasks, scheduled_tasks)
285+
return task_id_map
286+
287+
288+
def get_parser():
289+
parser = argparse.ArgumentParser()
290+
parser.add_argument("--event-path",
291+
help="Path to file containing serialized GitHub event")
292+
parser.add_argument("--dry-run", action="store_true",
293+
help="Don't actually create the tasks, just output the tasks that "
294+
"would be created")
295+
parser.add_argument("--tasks-path",
296+
help="Path to file in which to write payload for all scheduled tasks")
297+
return parser
298+
299+
300+
def run(venv, **kwargs):
301+
queue = taskcluster.Queue({'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']})
302+
event = get_event(queue, event_path=kwargs["event_path"])
303+
304+
task_id_map = decide(event)
305+
306+
try:
307+
if not kwargs["dry_run"]:
308+
create_tasks(queue, task_id_map)
309+
else:
310+
print(json.dumps(task_id_map, indent=2))
311+
finally:
312+
if kwargs["tasks_path"]:
313+
with open(kwargs["tasks_path"], "w") as f:
314+
json.dump(task_id_map, f, indent=2)

‎tools/ci/tcdownload.py ‎tools/ci/tc/download.py

+1
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
# be https://community-tc.services.mozilla.com)
1616
TASKCLUSTER_ROOT_URL = 'https://taskcluster.net'
1717

18+
1819
def get_parser():
1920
parser = argparse.ArgumentParser()
2021
parser.add_argument("--ref", action="store", default="master",

‎tools/ci/tc/taskgraph.py

+170
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
import json
2+
import os
3+
import re
4+
from copy import deepcopy
5+
6+
import six
7+
import yaml
8+
from six import iteritems
9+
10+
here = os.path.dirname(__file__)
11+
12+
13+
def first(iterable):
14+
# First item from a list or iterator
15+
if not hasattr(iterable, "next"):
16+
if hasattr(iterable, "__iter__"):
17+
iterable = iter(iterable)
18+
else:
19+
raise ValueError("Object isn't iterable")
20+
return next(iterable)
21+
22+
23+
def load_task_file(path):
24+
with open(path) as f:
25+
return yaml.safe_load(f)
26+
27+
28+
def update_recursive(data, update_data):
29+
for key, value in iteritems(update_data):
30+
if key not in data:
31+
data[key] = value
32+
else:
33+
initial_value = data[key]
34+
if isinstance(value, dict):
35+
if not isinstance(initial_value, dict):
36+
raise ValueError("Variable %s has inconsistent types "
37+
"(expected object)" % key)
38+
update_recursive(initial_value, value)
39+
elif isinstance(value, list):
40+
if not isinstance(initial_value, list):
41+
raise ValueError("Variable %s has inconsistent types "
42+
"(expected list)" % key)
43+
initial_value.extend(value)
44+
else:
45+
data[key] = value
46+
47+
48+
def resolve_use(task_data, templates):
49+
rv = {}
50+
if "use" in task_data:
51+
for template_name in task_data["use"]:
52+
update_recursive(rv, deepcopy(templates[template_name]))
53+
update_recursive(rv, task_data)
54+
rv.pop("use", None)
55+
return rv
56+
57+
58+
def resolve_name(task_data, default_name):
59+
if "name" not in task_data:
60+
task_data["name"] = default_name
61+
return task_data
62+
63+
64+
def resolve_chunks(task_data):
65+
if "chunks" not in task_data:
66+
return [task_data]
67+
rv = []
68+
total_chunks = task_data["chunks"]
69+
for i in range(1, total_chunks + 1):
70+
chunk_data = deepcopy(task_data)
71+
chunk_data["chunks"] = {"id": i,
72+
"total": total_chunks}
73+
rv.append(chunk_data)
74+
return rv
75+
76+
77+
def replace_vars(input_string, variables):
78+
# TODO: support replacing as a non-string type?
79+
variable_re = re.compile(r"(?<!\\)\${([^}]+)}")
80+
81+
def replacer(m):
82+
var = m.group(1).split(".")
83+
repl = variables
84+
for part in var:
85+
try:
86+
repl = repl[part]
87+
except Exception:
88+
# Don't substitute
89+
return m.group(0)
90+
return str(repl)
91+
92+
return variable_re.sub(replacer, input_string)
93+
94+
95+
def sub_variables(data, variables):
96+
if isinstance(data, six.string_types):
97+
return replace_vars(data, variables)
98+
if isinstance(data, list):
99+
return [sub_variables(item, variables) for item in data]
100+
if isinstance(data, dict):
101+
return {key: sub_variables(value, variables)
102+
for key, value in iteritems(data)}
103+
return data
104+
105+
106+
def substitute_variables(task):
107+
variables = {"vars": task.get("vars", {}),
108+
"chunks": task.get("chunks", {})}
109+
110+
return sub_variables(task, variables)
111+
112+
113+
def expand_maps(task):
114+
name = first(task.keys())
115+
if name != "$map":
116+
return [task]
117+
118+
map_data = task["$map"]
119+
if set(map_data.keys()) != set(["for", "do"]):
120+
raise ValueError("$map objects must have exactly two properties named 'for' "
121+
"and 'do' (got %s)" % ("no properties" if not map_data.keys()
122+
else ", ". join(map_data.keys())))
123+
rv = []
124+
for for_data in map_data["for"]:
125+
do_items = map_data["do"]
126+
if not isinstance(do_items, list):
127+
do_items = expand_maps(do_items)
128+
for do_data in do_items:
129+
task_data = deepcopy(for_data)
130+
if len(do_data.keys()) != 1:
131+
raise ValueError("Each item in the 'do' list must be an object "
132+
"with a single property")
133+
name = first(do_data.keys())
134+
update_recursive(task_data, deepcopy(do_data[name]))
135+
rv.append({name: task_data})
136+
return rv
137+
138+
139+
def load_tasks(tasks_data):
140+
map_resolved_tasks = {}
141+
tasks = []
142+
143+
for task in tasks_data["tasks"]:
144+
if len(task.keys()) != 1:
145+
raise ValueError("Each task must be an object with a single property")
146+
for task in expand_maps(task):
147+
if len(task.keys()) != 1:
148+
raise ValueError("Each task must be an object with a single property")
149+
name = first(task.keys())
150+
data = task[name]
151+
new_name = sub_variables(name, {"vars": data.get("vars", {})})
152+
if new_name in map_resolved_tasks:
153+
raise ValueError("Got duplicate task name %s" % new_name)
154+
map_resolved_tasks[new_name] = substitute_variables(data)
155+
156+
for task_default_name, data in iteritems(map_resolved_tasks):
157+
task = resolve_use(data, tasks_data["components"])
158+
task = resolve_name(task, task_default_name)
159+
tasks.extend(resolve_chunks(task))
160+
161+
tasks = [substitute_variables(task_data) for task_data in tasks]
162+
return {task["name"]: task for task in tasks}
163+
164+
165+
def load_tasks_from_path(path):
166+
return load_tasks(load_task_file(path))
167+
168+
169+
def run(venv, **kwargs):
170+
print(json.dumps(load_tasks_from_path(os.path.join(here, "tasks", "test.yml")), indent=2))

‎tools/ci/tc/tasks/test.yml

+354
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,354 @@
1+
components:
2+
wpt-base:
3+
provisionerId: proj-wpt
4+
workerType: ci
5+
schedulerId: taskcluster-github
6+
deadline: "24 hours"
7+
image: harjgam/web-platform-tests:0.33
8+
maxRunTime: 7200
9+
artifacts:
10+
public/results:
11+
path: /home/test/artifacts
12+
type: directory
13+
14+
wpt-testharness:
15+
chunks: 16
16+
vars:
17+
test-type: testharness
18+
19+
wpt-reftest:
20+
chunks: 5
21+
vars:
22+
test-type: reftest
23+
24+
wpt-wdspec:
25+
chunks: 1
26+
vars:
27+
test-type: wdspec
28+
29+
run-options:
30+
options:
31+
xvfb: true
32+
oom-killer: true
33+
hosts: true
34+
install-certificates: true
35+
36+
wpt-run:
37+
name: wpt-${vars.browser}-${vars.channel}-${vars.suite}-chunk-${chunks.id}
38+
options:
39+
browser:
40+
- ${vars.browser}
41+
channel: ${vars.channel}
42+
command: >-
43+
./tools/ci/taskcluster-run.py
44+
${vars.browser}
45+
--
46+
--channel=${vars.channel}
47+
--log-wptreport=../artifacts/wpt_report.json
48+
--log-wptscreenshot=../artifacts/wpt_screenshot.txt
49+
--no-fail-on-unexpected
50+
--this-chunk=${chunks.id}
51+
--total-chunks=${chunks.total}
52+
53+
trigger-master:
54+
trigger:
55+
branch:
56+
- master
57+
58+
trigger-push:
59+
trigger:
60+
branch:
61+
- triggers/${vars.browser}_${vars.channel}
62+
63+
trigger-daily:
64+
trigger:
65+
branch:
66+
- epochs/daily
67+
68+
trigger-weekly:
69+
trigger:
70+
branch:
71+
- epochs/weekly
72+
73+
trigger-pr:
74+
trigger:
75+
pull-request:
76+
77+
browser-firefox:
78+
depends-on:
79+
- download-firefox-${vars.channel}
80+
81+
browser-webkitgtk_minibrowser: {}
82+
83+
browser-chrome: {}
84+
85+
tox-python2:
86+
env:
87+
TOXENV: py27
88+
PY_COLORS: 0
89+
90+
tox-python3:
91+
env:
92+
TOXENV: py36
93+
PY_COLORS: 0
94+
install:
95+
- python3-pip
96+
97+
tasks:
98+
# Run full suites on push
99+
- $map:
100+
for:
101+
- vars:
102+
suite: testharness
103+
- vars:
104+
suite: reftest
105+
- vars:
106+
suite: wdspec
107+
do:
108+
$map:
109+
for:
110+
- vars:
111+
browser: firefox
112+
channel: nightly
113+
use:
114+
- trigger-master
115+
- trigger-push
116+
- vars:
117+
browser: firefox
118+
channel: beta
119+
use:
120+
- trigger-weekly
121+
- trigger-push
122+
- vars:
123+
browser: firefox
124+
channel: stable
125+
use:
126+
- trigger-daily
127+
- trigger-push
128+
- vars:
129+
browser: chrome
130+
channel: dev
131+
use:
132+
- trigger-master
133+
- trigger-push
134+
- vars:
135+
browser: chrome
136+
channel: beta
137+
use:
138+
- trigger-weekly
139+
- trigger-push
140+
- vars:
141+
browser: chrome
142+
channel: stable
143+
use:
144+
- trigger-daily
145+
- trigger-push
146+
- vars:
147+
browser: webkitgtk_minibrowser
148+
channel: nightly
149+
use:
150+
- trigger-daily
151+
- trigger-push
152+
- vars:
153+
browser: webkitgtk_minibrowser
154+
channel: stable
155+
use:
156+
- trigger-weekly
157+
- trigger-push
158+
159+
do:
160+
- ${vars.browser}-${vars.channel}-${vars.suite}:
161+
use:
162+
- wpt-base
163+
- run-options
164+
- wpt-run
165+
- browser-${vars.browser}
166+
- wpt-${vars.suite}
167+
description: >-
168+
A subset of WPT's "${vars.suite}" tests (chunk number ${chunks.id}
169+
of ${chunks.total}), run in the ${vars.channel} release of
170+
${vars.browser}.
171+
172+
- $map:
173+
for:
174+
- vars:
175+
browser: firefox
176+
channel: nightly
177+
- vars:
178+
browser: chrome
179+
channel: dev
180+
do:
181+
- wpt-${vars.browser}-${vars.channel}-stability:
182+
use:
183+
- wpt-base
184+
- browser-${vars.browser}
185+
description: >-
186+
Verify that all tests affected by a pull request are stable
187+
when executed in ${vars.browser}.
188+
command: >-
189+
./tools/ci/taskcluster-run.py
190+
--commit-range base_head
191+
${vars.browser}
192+
--
193+
--channel=${vars.channel}
194+
--verify
195+
196+
- wpt-${vars.browser}-${vars.channel}-results:
197+
use:
198+
- wpt-base
199+
- run-options
200+
- browser-${vars.browser}
201+
description: >-
202+
Collect results for all tests affected by a pull request in
203+
${vars.browser}.
204+
command: >-
205+
./tools/ci/taskcluster-run.py
206+
--commit-range base_head
207+
${vars.browser}
208+
--
209+
--channel=${vars.channel}
210+
--no-fail-on-unexpected
211+
--log-wptreport=../artifacts/wpt_report.json
212+
--log-wptscreenshot=../artifacts/wpt_screenshot.txt
213+
214+
- wpt-${vars.browser}-${vars.channel}-results-without-changes:
215+
use:
216+
- wpt-base
217+
- run-options
218+
- browser-${vars.browser}
219+
options:
220+
checkout: base_head
221+
description: >-
222+
Collect results for all tests affected by a pull request in
223+
${vars.browser} but without the changes in the PR.
224+
command: >-
225+
./tools/ci/taskcluster-run.py
226+
--commit-range task_head
227+
${vars.browser}
228+
--
229+
--channel=${vars.channel}
230+
--no-fail-on-unexpected
231+
--log-wptreport=../artifacts/wpt_report.json
232+
--log-wptscreenshot=../artifacts/wpt_screenshot.txt
233+
- $map:
234+
for:
235+
- vars:
236+
channel: nightly
237+
- vars:
238+
channel: beta
239+
- vars:
240+
channel: stable
241+
do:
242+
download-firefox-${vars.channel}:
243+
use:
244+
- wpt-base
245+
command: "./wpt install --download-only --destination /home/test/artifacts/ --channel=${vars.channel} firefox browser"
246+
247+
- lint:
248+
use:
249+
- wpt-base
250+
- trigger-master
251+
- trigger-pr
252+
description: >-
253+
Lint for wpt-specific requirements
254+
command: "./wpt lint --all"
255+
256+
- update-built:
257+
use:
258+
- wpt-base
259+
- trigger-pr
260+
schedule-if:
261+
run-job:
262+
- update_built
263+
command: "./tools/ci/ci_built_diff.sh"
264+
265+
- tools/ unittests (Python 2):
266+
use:
267+
- wpt-base
268+
- trigger-pr
269+
- tox-python2
270+
description: >-
271+
Unit tests for tools running under Python 2.7, excluding wptrunner
272+
command: ./tools/ci/ci_tools_unittest.sh
273+
env:
274+
HYPOTHESIS_PROFILE: ci
275+
schedule-if:
276+
run-job:
277+
- tools_unittest
278+
279+
- tools/ unittests (Python 3):
280+
description: >-
281+
Unit tests for tools running under Python 3, excluding wptrunner
282+
use:
283+
- wpt-base
284+
- trigger-pr
285+
- tox-python3
286+
command: ./tools/ci/ci_tools_unittest.sh
287+
env:
288+
HYPOTHESIS_PROFILE: ci
289+
schedule-if:
290+
run-job:
291+
- tools_unittest
292+
293+
- tools/wpt/ tests:
294+
description: >-
295+
Integration tests for wpt commands
296+
use:
297+
- wpt-base
298+
- trigger-pr
299+
- tox-python2
300+
command: ./tools/ci/ci_wpt.sh
301+
install:
302+
- libnss3-tools
303+
options:
304+
oom-killer: true
305+
browser:
306+
- firefox
307+
- chrome
308+
channel: experimental
309+
xvfb: true
310+
hosts: true
311+
schedule-if:
312+
run-job:
313+
- wpt_integration
314+
315+
- resources/ tests:
316+
description: >-
317+
Tests for testharness.js and other files in resources/
318+
use:
319+
- wpt-base
320+
- trigger-pr
321+
- tox-python2
322+
command: ./tools/ci/ci_resources_unittest.sh
323+
options:
324+
browser:
325+
- firefox
326+
xvfb: true
327+
hosts: true
328+
schedule-if:
329+
run-job:
330+
- resources_unittest
331+
332+
- infrastructure/ tests:
333+
description: >-
334+
Smoketests for wptrunner
335+
use:
336+
- wpt-base
337+
- trigger-pr
338+
- tox-python2
339+
command: ./tools/ci/ci_wptrunner_infrastructure.sh
340+
install:
341+
- libnss3-tools
342+
- libappindicator1
343+
- fonts-liberation
344+
options:
345+
oom-killer: true
346+
browser:
347+
- firefox
348+
- chrome
349+
channel: experimental
350+
xvfb: true
351+
hosts: false
352+
schedule-if:
353+
run-job:
354+
- wptrunner_infrastructure
File renamed without changes.

‎tools/ci/tc/tests/test_decision.py

+54
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import mock
2+
import pytest
3+
4+
from tools.ci.tc import decision
5+
from six import iteritems
6+
7+
8+
@pytest.mark.parametrize("run_jobs,tasks,expected", [
9+
([], {"task-no-schedule-if": {}}, ["task-no-schedule-if"]),
10+
([], {"task-schedule-if-no-run-job": {"schedule-if": {}}}, []),
11+
(["job"],
12+
{"job-present": {"schedule-if": {"run-job": ["other-job", "job"]}}},
13+
["job-present"]),
14+
(["job"], {"job-missing": {"schedule-if": {"run-job": ["other-job"]}}}, []),
15+
(["all"], {"job-all": {"schedule-if": {"run-job": ["other-job"]}}}, ["job-all"]),
16+
(["job"],
17+
{"job-1": {"schedule-if": {"run-job": ["job"]}},
18+
"job-2": {"schedule-if": {"run-job": ["other-job"]}}},
19+
["job-1"]),
20+
])
21+
def test_filter_schedule_if(run_jobs, tasks, expected):
22+
with mock.patch("tools.ci.tc.decision.get_run_jobs",
23+
return_value=run_jobs) as get_run_jobs:
24+
assert (decision.filter_schedule_if({}, tasks) ==
25+
{name: tasks[name] for name in expected})
26+
get_run_jobs.call_count in (0, 1)
27+
28+
29+
@pytest.mark.parametrize("msg,expected", [
30+
("Some initial line\n\ntc-jobs:foo,bar", {"foo", "bar"}),
31+
("Some initial line\n\ntc-jobs:foo, bar", {"foo", "bar"}),
32+
("tc-jobs:foo, bar \nbaz", {"foo", "bar"}),
33+
("tc-jobs:all", {"all"}),
34+
("", set()),
35+
("tc-jobs:foo\ntc-jobs:bar", {"foo"})])
36+
@pytest.mark.parametrize("event", [
37+
{"commits": [{"message": "<message>"}]},
38+
{"pull_request": {"body": "<message>"}}
39+
])
40+
def test_extra_jobs_pr(msg, expected, event):
41+
def sub(obj):
42+
"""Copy obj, except if it's a string with the value <message>
43+
replace it with the value of the msg argument"""
44+
if isinstance(obj, dict):
45+
return {key: sub(value) for (key, value) in iteritems(obj)}
46+
elif isinstance(obj, list):
47+
return [sub(value) for value in obj]
48+
elif obj == "<message>":
49+
return msg
50+
return obj
51+
52+
event = sub(event)
53+
54+
assert decision.get_extra_jobs(event) == expected

‎tools/ci/tc/tests/test_taskgraph.py

+146
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
import pytest
2+
import yaml
3+
4+
from tools.ci.tc import taskgraph
5+
6+
@pytest.mark.parametrize("data, update_data, expected", [
7+
({"a": 1}, {"b": 2}, {"a": 1, "b": 2}),
8+
({"a": 1}, {"a": 2}, {"a": 2}),
9+
({"a": [1]}, {"a": [2]}, {"a": [1, 2]}),
10+
({"a": {"b": 1, "c": 2}}, {"a": {"b": 2, "d": 3}}, {"a": {"b": 2, "c": 2, "d": 3}}),
11+
({"a": {"b": [1]}}, {"a": {"b": [2]}}, {"a": {"b": [1, 2]}}),
12+
]
13+
)
14+
def test_update_recursive(data, update_data, expected):
15+
taskgraph.update_recursive(data, update_data)
16+
assert data == expected
17+
18+
19+
def test_use():
20+
data = """
21+
components:
22+
component1:
23+
a: 1
24+
b: [1]
25+
c: "c"
26+
component2:
27+
a: 2
28+
b: [2]
29+
d: "d"
30+
tasks:
31+
- task1:
32+
use:
33+
- component1
34+
- component2
35+
b: [3]
36+
c: "e"
37+
"""
38+
tasks_data = yaml.safe_load(data)
39+
assert taskgraph.load_tasks(tasks_data) == {
40+
"task1": {
41+
"a": 2,
42+
"b": [1,2,3],
43+
"c": "e",
44+
"d": "d",
45+
"name": "task1"
46+
}
47+
}
48+
49+
50+
def test_var():
51+
data = """
52+
components:
53+
component1:
54+
a: ${vars.value}
55+
tasks:
56+
- task1:
57+
use:
58+
- component1
59+
vars:
60+
value: 1
61+
"""
62+
tasks_data = yaml.safe_load(data)
63+
assert taskgraph.load_tasks(tasks_data) == {
64+
"task1": {
65+
"a": "1",
66+
"vars": {"value": 1},
67+
"name": "task1"
68+
}
69+
}
70+
71+
72+
def test_map():
73+
data = """
74+
components: {}
75+
tasks:
76+
- $map:
77+
for:
78+
- vars:
79+
a: 1
80+
b: [1]
81+
- vars:
82+
a: 2
83+
b: [2]
84+
do:
85+
- task1-${vars.a}:
86+
a: ${vars.a}
87+
b: [3]
88+
- task2-${vars.a}:
89+
a: ${vars.a}
90+
b: [4]
91+
"""
92+
tasks_data = yaml.safe_load(data)
93+
assert taskgraph.load_tasks(tasks_data) == {
94+
"task1-1": {
95+
"a": "1",
96+
"b": [1, 3],
97+
"vars": {"a": 1},
98+
"name": "task1-1"
99+
},
100+
"task1-2": {
101+
"a": "2",
102+
"b": [2, 3],
103+
"vars": {"a": 2},
104+
"name": "task1-2"
105+
},
106+
"task2-1": {
107+
"a": "1",
108+
"b": [1, 4],
109+
"vars": {"a": 1},
110+
"name": "task2-1"
111+
},
112+
"task2-2": {
113+
"a": "2",
114+
"b": [2, 4],
115+
"vars": {"a": 2},
116+
"name": "task2-2"
117+
},
118+
119+
}
120+
121+
122+
def test_chunks():
123+
data = """
124+
components: {}
125+
tasks:
126+
- task1:
127+
name: task1-${chunks.id}
128+
chunks: 2
129+
"""
130+
tasks_data = yaml.safe_load(data)
131+
assert taskgraph.load_tasks(tasks_data) == {
132+
"task1-1": {
133+
"name": "task1-1",
134+
"chunks": {
135+
"id": 1,
136+
"total": 2
137+
}
138+
},
139+
"task1-2": {
140+
"name": "task1-2",
141+
"chunks": {
142+
"id": 2,
143+
"total": 2
144+
}
145+
}
146+
}

‎tools/ci/tc/tests/test_valid.py

+73
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
import json
2+
import os
3+
4+
import jsone
5+
import mock
6+
import pytest
7+
import requests
8+
import sys
9+
import yaml
10+
from jsonschema import validate
11+
12+
here = os.path.dirname(__file__)
13+
root = os.path.abspath(os.path.join(here, "..", "..", "..", ".."))
14+
15+
16+
def data_path(filename):
17+
return os.path.join(here, "..", "testdata", filename)
18+
19+
20+
@pytest.mark.xfail(sys.version_info.major == 2,
21+
reason="taskcluster library has an encoding bug "
22+
"https://github.com/taskcluster/json-e/issues/338")
23+
def test_verify_taskcluster_yml():
24+
"""Verify that the json-e in the .taskcluster.yml is valid"""
25+
with open(os.path.join(root, ".taskcluster.yml")) as f:
26+
template = yaml.safe_load(f)
27+
28+
events = [("pr_event.json", "github-pull-request", "Pull Request"),
29+
("master_push_event.json", "github-push", "Push to master")]
30+
31+
for filename, tasks_for, title in events:
32+
with open(data_path(filename)) as f:
33+
event = json.load(f)
34+
35+
context = {"tasks_for": tasks_for,
36+
"event": event,
37+
"as_slugid": lambda x: x}
38+
39+
jsone.render(template, context)
40+
41+
42+
def test_verify_payload():
43+
"""Verify that the decision task produces tasks with a valid payload"""
44+
from tools.ci.tc.decision import decide
45+
46+
create_task_schema = requests.get(
47+
"https://raw.githubusercontent.com/taskcluster/taskcluster/blob/master/services/queue/schemas/v1/create-task-request.yml")
48+
create_task_schema = yaml.safe_load(create_task_schema.content)
49+
50+
payload_schema = requests.get("https://raw.githubusercontent.com/taskcluster/docker-worker/master/schemas/v1/payload.json").json()
51+
52+
jobs = ["lint",
53+
"manifest_upload",
54+
"resources_unittest",
55+
"tools_unittest",
56+
"wpt_integration",
57+
"wptrunner_infrastructure",
58+
"wptrunner_unittest"]
59+
60+
for filename in ["pr_event.json", "master_push_event.json"]:
61+
with open(data_path(filename)) as f:
62+
event = json.load(f)
63+
64+
with mock.patch("tools.ci.tc.decision.get_fetch_rev", return_value=(event["after"], None)):
65+
with mock.patch("tools.ci.tc.decision.get_run_jobs", return_value=set(jobs)):
66+
task_id_map = decide(event)
67+
for name, (task_id, task_data) in task_id_map.items():
68+
try:
69+
validate(instance=task_data, schema=create_task_schema)
70+
validate(instance=task_data["payload"], schema=payload_schema)
71+
except Exception as e:
72+
print("Validation failed for task '%s':\n%s" % (name, json.dumps(task_data, indent=2)))
73+
raise e

‎tools/ci/tests/test_run_tc.py

-33
This file was deleted.

‎tools/taskcluster/commands.json

-4
This file was deleted.

‎tools/taskcluster/verify.py

-37
This file was deleted.

‎tools/tox.ini

+4-1
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,11 @@ deps =
88
pytest-cov
99
mock
1010
hypothesis
11-
# `requests` is required by `pr_preview.py`
1211
requests
12+
taskcluster
13+
pyyaml
14+
json-e
15+
jsonschema
1316

1417
commands = pytest {posargs}
1518

‎tools/wpt/browser.py

+89-18
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,11 @@ class Browser(object):
4747
def __init__(self, logger):
4848
self.logger = logger
4949

50+
@abstractmethod
51+
def download(self, dest=None, channel=None):
52+
"""Download a package or installer for the browser"""
53+
return NotImplemented
54+
5055
@abstractmethod
5156
def install(self, dest=None):
5257
"""Install the browser."""
@@ -116,11 +121,19 @@ def platform_string_geckodriver(self):
116121

117122
return "%s%s" % (self.platform, bits)
118123

119-
def install(self, dest=None, channel="nightly"):
120-
"""Install Firefox."""
124+
def _get_dest(self, dest, channel):
125+
if dest is None:
126+
# os.getcwd() doesn't include the venv path
127+
dest = os.path.join(os.getcwd(), "_venv")
121128

122-
import mozinstall
129+
dest = os.path.join(dest, "browsers", channel)
123130

131+
if not os.path.exists(dest):
132+
os.makedirs(dest)
133+
134+
return dest
135+
136+
def download(self, dest=None, channel="nightly"):
124137
product = {
125138
"nightly": "firefox-nightly-latest-ssl",
126139
"beta": "firefox-beta-latest-ssl",
@@ -136,21 +149,15 @@ def install(self, dest=None, channel="nightly"):
136149
}
137150
os_key = (self.platform, uname[4])
138151

152+
if dest is None:
153+
dest = self._get_dest(None, channel)
154+
139155
if channel not in product:
140156
raise ValueError("Unrecognised release channel: %s" % channel)
141157

142158
if os_key not in os_builds:
143159
raise ValueError("Unsupported platform: %s %s" % os_key)
144160

145-
if dest is None:
146-
# os.getcwd() doesn't include the venv path
147-
dest = os.path.join(os.getcwd(), "_venv")
148-
149-
dest = os.path.join(dest, "browsers", channel)
150-
151-
if not os.path.exists(dest):
152-
os.makedirs(dest)
153-
154161
url = "https://download.mozilla.org/?product=%s&os=%s&lang=en-US" % (product[channel],
155162
os_builds[os_key])
156163
self.logger.info("Downloading Firefox from %s" % url)
@@ -175,6 +182,18 @@ def install(self, dest=None, channel="nightly"):
175182
with open(installer_path, "wb") as f:
176183
f.write(resp.content)
177184

185+
return installer_path
186+
187+
def install(self, dest=None, channel="nightly"):
188+
"""Install Firefox."""
189+
import mozinstall
190+
191+
dest = self._get_dest(dest, channel)
192+
193+
filename = os.path.basename(dest)
194+
195+
installer_path = self.download(dest, channel)
196+
178197
try:
179198
mozinstall.install(installer_path, dest)
180199
except mozinstall.mozinstall.InstallError:
@@ -422,7 +441,7 @@ class FirefoxAndroid(Browser):
422441
product = "firefox_android"
423442
requirements = "requirements_firefox.txt"
424443

425-
def install(self, dest=None, channel=None):
444+
def download(self, dest=None, channel=None):
426445
if dest is None:
427446
dest = os.pwd
428447

@@ -452,6 +471,9 @@ def install(self, dest=None, channel=None):
452471

453472
return apk_path
454473

474+
def install(self, dest=None, channel=None):
475+
return self.download(dest, channel)
476+
455477
def install_prefs(self, binary, dest=None, channel=None):
456478
fx_browser = Firefox(self.logger)
457479
return fx_browser.install_prefs(binary, dest, channel)
@@ -478,6 +500,9 @@ class Chrome(Browser):
478500
product = "chrome"
479501
requirements = "requirements_chrome.txt"
480502

503+
def download(self, dest=None, channel=None):
504+
raise NotImplementedError
505+
481506
def install(self, dest=None, channel=None):
482507
raise NotImplementedError
483508

@@ -633,6 +658,9 @@ def __init__(self, logger):
633658
super(ChromeAndroidBase, self).__init__(logger)
634659
self.device_serial = None
635660

661+
def download(self, dest=None, channel=None):
662+
raise NotImplementedError
663+
636664
def install(self, dest=None, channel=None):
637665
raise NotImplementedError
638666

@@ -724,6 +752,9 @@ class ChromeiOS(Browser):
724752
product = "chrome_ios"
725753
requirements = "requirements_chrome_ios.txt"
726754

755+
def download(self, dest=None, channel=None):
756+
raise NotImplementedError
757+
727758
def install(self, dest=None, channel=None):
728759
raise NotImplementedError
729760

@@ -757,6 +788,9 @@ def binary(self):
757788
self.logger.warning("Unable to find the browser binary.")
758789
return None
759790

791+
def download(self, dest=None, channel=None):
792+
raise NotImplementedError
793+
760794
def install(self, dest=None, channel=None):
761795
raise NotImplementedError
762796

@@ -826,6 +860,9 @@ class EdgeChromium(Browser):
826860
edgedriver_name = "msedgedriver"
827861
requirements = "requirements_edge_chromium.txt"
828862

863+
def download(self, dest=None, channel=None):
864+
raise NotImplementedError
865+
829866
def install(self, dest=None, channel=None):
830867
raise NotImplementedError
831868

@@ -922,6 +959,9 @@ class Edge(Browser):
922959
product = "edge"
923960
requirements = "requirements_edge.txt"
924961

962+
def download(self, dest=None, channel=None):
963+
raise NotImplementedError
964+
925965
def install(self, dest=None, channel=None):
926966
raise NotImplementedError
927967

@@ -953,6 +993,9 @@ class InternetExplorer(Browser):
953993
product = "ie"
954994
requirements = "requirements_ie.txt"
955995

996+
def download(self, dest=None, channel=None):
997+
raise NotImplementedError
998+
956999
def install(self, dest=None, channel=None):
9571000
raise NotImplementedError
9581001

@@ -978,6 +1021,9 @@ class Safari(Browser):
9781021
product = "safari"
9791022
requirements = "requirements_safari.txt"
9801023

1024+
def download(self, dest=None, channel=None):
1025+
raise NotImplementedError
1026+
9811027
def install(self, dest=None, channel=None):
9821028
raise NotImplementedError
9831029

@@ -1037,17 +1083,33 @@ def platform_components(self):
10371083

10381084
return (platform, extension, decompress)
10391085

1040-
def install(self, dest=None, channel="nightly"):
1041-
"""Install latest Browser Engine."""
1086+
def _get(self, channel="nightly"):
10421087
if channel != "nightly":
10431088
raise ValueError("Only nightly versions of Servo are available")
1089+
1090+
platform, extension, _ = self.platform_components()
1091+
url = "https://download.servo.org/nightly/%s/servo-latest%s" % (platform, extension)
1092+
return get(url)
1093+
1094+
def download(self, dest=None, channel="nightly"):
10441095
if dest is None:
10451096
dest = os.pwd
10461097

1047-
platform, extension, decompress = self.platform_components()
1048-
url = "https://download.servo.org/nightly/%s/servo-latest%s" % (platform, extension)
1098+
resp = self._get(dest, channel)
1099+
_, extension, _ = self.platform_components()
1100+
1101+
with open(os.path.join(dest, "servo-latest%s" % (extension,)), "w") as f:
1102+
f.write(resp.content)
1103+
1104+
def install(self, dest=None, channel="nightly"):
1105+
"""Install latest Browser Engine."""
1106+
if dest is None:
1107+
dest = os.pwd
1108+
1109+
_, _, decompress = self.platform_components()
10491110

1050-
decompress(get(url).raw, dest=dest)
1111+
resp = self._get(dest, channel)
1112+
decompress(resp.raw, dest=dest)
10511113
path = find_executable("servo", os.path.join(dest, "servo"))
10521114
st = os.stat(path)
10531115
os.chmod(path, st.st_mode | stat.S_IEXEC)
@@ -1083,6 +1145,9 @@ class Sauce(Browser):
10831145
product = "sauce"
10841146
requirements = "requirements_sauce.txt"
10851147

1148+
def download(self, dest=None, channel=None):
1149+
raise NotImplementedError
1150+
10861151
def install(self, dest=None, channel=None):
10871152
raise NotImplementedError
10881153

@@ -1105,6 +1170,9 @@ class WebKit(Browser):
11051170
product = "webkit"
11061171
requirements = "requirements_webkit.txt"
11071172

1173+
def download(self, dest=None, channel=None):
1174+
raise NotImplementedError
1175+
11081176
def install(self, dest=None, channel=None):
11091177
raise NotImplementedError
11101178

@@ -1168,6 +1236,9 @@ class Epiphany(Browser):
11681236
product = "epiphany"
11691237
requirements = "requirements_epiphany.txt"
11701238

1239+
def download(self, dest=None, channel=None):
1240+
raise NotImplementedError
1241+
11711242
def install(self, dest=None, channel=None):
11721243
raise NotImplementedError
11731244

‎tools/wpt/install.py

+10-7
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ def get_parser():
4242
'the latest available development release. For WebDriver installs, '
4343
'we attempt to select an appropriate, compatible, version for the '
4444
'latest browser release on the selected channel.')
45+
parser.add_argument('--download-only', action="store_true",
46+
help="Download the selected component but don't install it")
4547
parser.add_argument('-d', '--destination',
4648
help='filesystem directory to place the component')
4749
return parser
@@ -73,21 +75,22 @@ def run(venv, **kwargs):
7375
raise argparse.ArgumentError(None,
7476
"No --destination argument, and no default for the environment")
7577

76-
install(browser, kwargs["component"], destination, channel)
78+
install(browser, kwargs["component"], destination, channel,
79+
download_only=kwargs["download_only"])
7780

7881

79-
def install(name, component, destination, channel="nightly", logger=None):
82+
def install(name, component, destination, channel="nightly", logger=None, download_only=False):
8083
if logger is None:
8184
import logging
8285
logger = logging.getLogger("install")
8386

84-
if component == 'webdriver':
85-
method = 'install_webdriver'
86-
else:
87-
method = 'install'
87+
prefix = "download" if download_only else "install"
88+
suffix = "_webdriver" if component == 'webdriver' else ""
89+
90+
method = prefix + suffix
8891

8992
subclass = getattr(browser, name.title())
9093
sys.stdout.write('Now installing %s %s...\n' % (name, component))
9194
path = getattr(subclass(logger), method)(dest=destination, channel=channel)
9295
if path:
93-
sys.stdout.write('Binary installed as %s\n' % (path,))
96+
sys.stdout.write('Binary %s as %s\n' % ("downloaded" if download_only else "installed", path,))

‎tools/wpt/paths

-1
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,4 @@ tools/docker/
33
tools/lint/
44
tools/manifest/
55
tools/serve/
6-
tools/taskcluster/
76
tools/wpt/

0 commit comments

Comments
 (0)
Please sign in to comment.