forked from sdgtt/jenkins-shared-library
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathGauntlet.groovy
559 lines (516 loc) · 16.9 KB
/
Gauntlet.groovy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
package sdg
/** A map that holds all constants and data members that can be override when constructing */
gauntEnv
/**
* Imitates a constructor
* Defines an instance of Consul object. All according to api
* @param dependencies - List of strings which are names of dependencies
* @param hdlBranch - String of name of hdl branch to use for bootfile source
* @param linuxBranch - String of name of linux branch to use for bootfile source
* @param firmwareVersion - String of name of firmware version branch to use for pluto and m2k
* @param bootfile_source - String location of bootfiles. Options: sftp, artifactory, http, local
* @return constructed object
*/
def construct(List dependencies, hdlBranch, linuxBranch, firmwareVersion, bootfile_source) {
gauntEnv = [
dependencies: dependencies,
hdlBranch: hdlBranch,
linuxBranch: linuxBranch,
pyadiBranch: 'master',
firmwareVersion: firmwareVersion,
bootfile_source: bootfile_source,
agents_online: '',
debug: false,
board_map: [:],
stages: [],
agents: [],
boards: [],
required_hardware: [],
enable_docker: false,
docker_image: 'tfcollins/sw-ci:latest',
docker_args: ['MATLAB','Vivado'],
enable_update_boot_pre_docker: false,
setup_called: false,
nebula_debug: false,
nebula_local_fs_source_root: '/var/lib/tftpboot',
configure_called: false
]
gauntEnv.agents_online = getOnlineAgents()
}
/* *
* Print list of online agents
*/
def print_agents() {
println(gauntEnv.agents_online)
}
private def setup_agents() {
def board_map = [:]
// Query each agent for their connected hardware
def jobs = [:]
for (agent in gauntEnv.agents_online) {
println('Agent: ' + agent)
def agent_name = agent
jobs[agent_name] = {
node(agent_name) {
stage('Query agents') {
setupAgent(['nebula','libiio'])
// Get necessary configuration for basic work
board = nebula('update-config board-config board-name')
board_map[agent_name] = board
}
}
}
}
stage('Get Available\nTest Boards') {
parallel jobs
}
gauntEnv.board_map = board_map
(agents, boards) = splitMap(board_map,true)
gauntEnv.agents = agents
gauntEnv.boards = boards
}
/**
* Add stage to agent pipeline
* @param stage_name String name of stage
* @return Closure of stage requested
*/
def stage_library(String stage_name) {
switch (stage_name) {
case 'UpdateBOOTFiles':
println('Added Stage UpdateBOOTFiles')
cls = { String board ->
try {
stage('Update BOOT Files') {
println("Board name passed: "+board)
if (board=="pluto")
nebula('dl.bootfiles --board-name=' + board + ' --branch=' + gauntEnv.firmwareVersion)
else
nebula('dl.bootfiles --board-name=' + board + ' --source-root="' + gauntEnv.nebula_local_fs_source_root + '" --source=' + gauntEnv.bootfile_source)
nebula('manager.update-boot-files --board-name=' + board + ' --folder=outs', full=false, show_log=true)
if (board=="pluto")
nebula('uart.set-local-nic-ip-from-usbdev')
}}
catch(Exception ex) {
cleanWs();
throw new Exception('Update boot files failed');
}
};
break
case 'CollectLogs':
println('Added Stage CollectLogs')
cls = {
stage('Collect Logs') {
echo 'Collect Logs'
}
};
break
case 'LinuxTests':
println('Added Stage LinuxTests')
cls = { String board ->
try {
stage('Linux Tests') {
run_i('pip3 install pylibiio')
//def ip = nebula('uart.get-ip')
def ip = nebula('update-config network-config dutip --board-name='+board)
nebula("net.check-dmesg --ip='"+ip+"'")
nebula('driver.check-iio-devices --uri="ip:'+ip+'" --board-name='+board)
}
}
finally {
// Rename logs
run_i("mv dmesg.log dmesg_" + board + ".log")
run_i("mv dmesg_err.log dmesg_" + board + "_err.log")
run_i("mv dmesg_warn.log dmesg_" + board + "_warn.log")
archiveArtifacts artifacts: '*.log', followSymlinks: false, allowEmptyArchive: true
}
};
break
case 'PyADITests':
cls = { String board ->
try
{
stage('Run Python Tests') {
//def ip = nebula('uart.get-ip')
def ip = nebula('update-config network-config dutip --board-name='+board)
println('IP: ' + ip)
sh 'git clone -b "' + gauntEnv.pyadiBranch + '" https://github.com/analogdevicesinc/pyadi-iio.git'
dir('pyadi-iio')
{
run_i('pip3 install -r requirements.txt')
run_i('pip3 install -r requirements_dev.txt')
run_i('pip3 install pylibiio')
run_i('mkdir testxml')
board = board.replaceAll('-', '_')
cmd = "python3 -m pytest --junitxml=testxml/" + board + "_reports.xml --adi-hw-map -v -k 'not stress' -s --uri='ip:"+ip+"' -m " + board
def statusCode = sh script:cmd, returnStatus:true
if ((statusCode != 5) && (statusCode != 0)) // Ignore error 5 which means no tests were run
error "Error code: "+statusCode.toString()
}
}
}
finally
{
junit testResults: 'pyadi-iio/testxml/*.xml', allowEmptyResults: true
}
}
break
default:
throw new Exception('Unknown library stage: ' + stage_name)
}
return cls
}
/**
* Add stage to agent pipeline
* @param cls Closure of stage(s). Should contain at least one stage closure.
*/
def add_stage(cls) {
gauntEnv.stages.add(cls)
}
private def collect_logs() {
def num_boards = gauntEnv.boards.size()
node('master') {
stage('Collect Logs') {
for (i = 0; i < num_boards; i++) {
def agent = gauntEnv.agents[i]
def board = gauntEnv.boards[i]
println("Processing log for board: "+board+" ("+agent+")")
}
}
}
}
private def run_agents() {
// Start stages for each node with a board
def jobs = [:]
def num_boards = gauntEnv.boards.size()
def docker_args = getDockerConfig(gauntEnv.docker_args)
def enable_update_boot_pre_docker = gauntEnv.enable_update_boot_pre_docker
def pre_docker_cls = stage_library("UpdateBOOTFiles")
docker_args.add("-v /etc/default:/default:ro")
docker_args.add("-v /dev:/dev")
if (docker_args instanceof List) {
docker_args = docker_args.join(' ')
}
def oneNode = { agent, num_stages, stages, board ->
def k
node(agent) {
for (k = 0; k < num_stages; k++) {
println("Stage called for board: "+board)
stages[k].call(board)
}
cleanWs();
}
}
def oneNodeDocker = { agent, num_stages, stages, board, docker_image_name, enable_update_boot_pre_docker_flag, pre_docker_closure ->
def k
node(agent) {
try {
if (enable_update_boot_pre_docker_flag)
pre_docker_closure.call(board)
docker.image(docker_image_name).inside(docker_args) {
try {
stage('Setup Docker') {
sh 'cp /default/nebula /etc/default/nebula'
sh 'cp /default/pyadi_test.yaml /etc/default/pyadi_test.yaml || true'
setupAgent(['libiio','nebula'], true);
// Above cleans up so we need to move to a valid folder
sh 'cd /tmp'
}
for (k = 0; k < num_stages; k++) {
println("Stage called for board: "+board)
stages[k].call(board)
}
}
finally {
println("Cleaning up after board stages");
cleanWs();
}
}
}
finally {
sh 'docker ps -q -f status=exited | xargs --no-run-if-empty docker rm'
}
}
}
for (i = 0; i < num_boards; i++) {
def agent = gauntEnv.agents[i]
def board = gauntEnv.boards[i]
def stages = gauntEnv.stages
def docker_image = gauntEnv.docker_image
def num_stages = stages.size()
println('Agent: ' + agent + ' Board: ' + board)
println('Number of stages to run: ' + num_stages.toString())
/*
jobs[agent+"-"+board] = {
node(agent) {
for (k=0; k<num_stages; k++) {
println("Running stage: "+k.toString());
stages[k].call();
}
}
}
*/
if (gauntEnv.enable_docker)
jobs[agent + '-' + board] = { oneNodeDocker(agent, num_stages, stages, board, docker_image, enable_update_boot_pre_docker, pre_docker_cls) };
else
jobs[agent + '-' + board] = { oneNode(agent, num_stages, stages, board) };
}
stage('Update and Test') {
parallel jobs
}
}
/**
* Set list of required devices for test
* @param board_names list of strings of names of boards
* Strings must be associated with a board configuration name.
* For example: zynq-zc702-adv7511-ad9361-fmcomms2-3
*/
def set_required_hardware(List board_names) {
assert board_names instanceof java.util.List
gauntEnv.required_hardware = board_names
}
/**
* Set nebula debug mode. Setting true will add show-log to nebula commands
* @param nebula_debug Boolean of debug mode
*/
def set_nebula_debug(nebula_debug) {
gauntEnv.nebula_debug = nebula_debug
}
/**
* Set nebula downloader local_fs source_path.
* @param nebula_local_fs_source_root String of path
*/
def set_nebula_local_fs_source_root(nebula_local_fs_source_root) {
gauntEnv.nebula_local_fs_source_root = nebula_local_fs_source_root
}
/**
* Set pyadi branch name to use for testing.
* @param pyadi_branch String of branch name
*/
def set_pyadi_branch(pyadi_branch) {
gauntEnv.pyadiBranch = pyadi_branch
}
/**
* Set docker args passed to docker container at runtime.
* @param docker_args List of strings of args
*/
def set_docker_args(docker_args) {
gauntEnv.docker_args = docker_args
}
/**
* Enable use of docker at agent during jobs phases.
* @param enable_docker boolean True will enable use of docker
*/
def set_enable_docker(enable_docker) {
gauntEnv.enable_docker = enable_docker
}
/**
* Enable update boot to be run before docker is launched.
* @param set_enable_update_boot_pre_docker boolean True will run update boot stage before docker is launch
*/
def set_enable_update_boot_pre_docker(enable_update_boot_pre_docker) {
gauntEnv.enable_update_boot_pre_docker = enable_update_boot_pre_docker
}
private def check_required_hardware() {
def s = gauntEnv.required_hardware.size()
def b = gauntEnv.boards.size()
def filtered_board_list = []
def filtered_agent_list = []
println("Found boards:")
for (k = 0; k < b; k++) {
println("Agent: "+gauntEnv.agents[k]+" Board: "+gauntEnv.boards[k])
}
for (i = 0; i < s; i++) {
if (! gauntEnv.boards.contains(gauntEnv.required_hardware[i]) ) {
error(gauntEnv.required_hardware[i] + ' not found in harness. Failing pipeline')
}
// Filter out
// def indx = gauntEnv.boards.indexOf(gauntEnv.required_hardware[i])
//filtered_board_list.add(gauntEnv.boards[indx])
//filtered_agent_list.add(gauntEnv.agents[indx])
filtered_board_list.add(gauntEnv.boards[i])
filtered_agent_list.add(gauntEnv.agents[i])
}
// Update to filtered lists
if (s > 0) {
gauntEnv.boards = filtered_board_list
gauntEnv.agents = filtered_agent_list
}
}
/**
* Main method for starting pipeline once configuration is complete
* Once called all agents are queried for attached boards and parallel stages
* will generated and mapped to relevant agents
*/
def run_stages() {
setup_agents()
check_required_hardware()
run_agents()
collect_logs()
}
// Private methods
@NonCPS
private def splitMap(map, do_split=false) {
def keys = []
def values = []
def tmp;
for (entry in map) {
if (do_split)
{
tmp = entry.value
tmp = tmp.split(",")
for (i=0;i<tmp.size();i++)
{
keys.add(entry.key)
values.add(tmp[i].replaceAll(" ",""))
}
}
else
{
keys.add(entry.key)
values.add(entry.value)
}
}
return [keys, values]
}
@NonCPS
private def getOnlineAgents() {
def jenkins = Jenkins.instance
def online_agents = []
for (agent in jenkins.getNodes()) {
def computer = agent.computer
if (computer.name == 'alpine') {
continue
}
if (!computer.offline) {
online_agents.add(computer.name)
}
}
println(online_agents)
return online_agents
}
private def checkOs() {
if (isUnix()) {
def uname = sh script: 'uname', returnStdout: true
if (uname.startsWith('Darwin')) {
return 'Macos'
}
// Optionally add 'else if' for other Unix OS
else {
return 'Linux'
}
}
else {
return 'Windows'
}
}
def nebula(cmd, full=false, show_log=false) {
// full=false
if (gauntEnv.nebula_debug) {
show_log = true
}
if (show_log) {
cmd = 'show-log ' + cmd
}
cmd = 'nebula ' + cmd
if (checkOs() == 'Windows') {
script_out = bat(script: cmd, returnStdout: true).trim()
}
else {
script_out = sh(script: cmd, returnStdout: true).trim()
}
// Remove lines
if (!full) {
lines = script_out.split('\n')
if (lines.size() == 1) {
return script_out
}
out = ''
added = 0
for (i = 1; i < lines.size(); i++) {
if (lines[i].contains('WARNING')) {
continue
}
if (added > 0) {
out = out + '\n'
}
out = out + lines[i]
added = added + 1
}
}
return out
}
private def install_nebula() {
if (checkOs() == 'Windows') {
bat 'git clone https://github.com/tagoylo/nebula.git'
dir('nebula')
{
bat 'pip install -r requirements.txt'
bat 'python setup.py install'
}
}
else {
sh 'pip3 uninstall nebula -y || true'
sh 'git clone https://github.com/tagoylo/nebula.git'
dir('nebula')
{
sh 'pip3 install -r requirements.txt'
sh 'python3 setup.py install'
}
}
}
private def install_libiio() {
if (checkOs() == 'Windows') {
bat 'git clone https://github.com/analogdevicesinc/libiio.git'
dir('libiio')
{
bat 'mkdir build'
bat('build')
{
//sh 'cmake .. -DPYTHON_BINDINGS=ON'
bat 'cmake ..'
bat 'cmake --build . --config Release --install'
}
}
}
else {
sh 'git clone -b v0.19 https://github.com/analogdevicesinc/libiio.git'
dir('libiio')
{
sh 'mkdir build'
dir('build')
{
//sh 'cmake .. -DPYTHON_BINDINGS=ON'
sh 'cmake ..'
sh 'make'
sh 'make install'
sh 'ldconfig'
}
}
}
}
private def setupAgent(deps, skip_cleanup = false) {
try {
def i;
for (i = 0; i < deps.size; i++) {
println(deps[i])
if (deps[i] == 'nebula') {
install_nebula()
}
if (deps[i] == 'libiio') {
install_libiio()
}
}
}
finally {
if (!skip_cleanup)
cleanWs()
}
}
private def run_i(cmd) {
if (checkOs() == 'Windows') {
bat cmd
}
else {
sh cmd
}
}