Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import sys # arg
import re # reg
import logging
import os
import shutil
import subprocess
import time
import pyshark
import threading
import cls_cmd
from multiprocessing import Process, Lock, SimpleQueue
from zipfile import ZipFile
#-----------------------------------------------------------
# OAI Testing modules
#-----------------------------------------------------------
import cls_cluster as OC
import cls_cmd
import sshconnection as SSH
import helpreadme as HELP
import constants as CONST
import cls_oaicitest
#-----------------------------------------------------------
# Helper functions used here and in other classes
# (e.g., cls_cluster.py)
#-----------------------------------------------------------
IMAGES = ['oai-enb', 'oai-lte-ru', 'oai-lte-ue', 'oai-gnb', 'oai-nr-cuup', 'oai-gnb-aw2s', 'oai-nr-ue']
def CreateWorkspace(sshSession, sourcePath, ranRepository, ranCommitID, ranTargetBranch, ranAllowMerge):
if ranCommitID == '':
logging.error('need ranCommitID in CreateWorkspace()')
sys.exit('Insufficient Parameter in CreateWorkspace()')
sshSession.command(f'rm -rf {sourcePath}', '\$', 10)
sshSession.command('mkdir -p ' + sourcePath, '\$', 5)
sshSession.command('cd ' + sourcePath, '\$', 5)
# Recent version of git (>2.20?) should handle missing .git extension # without problems
if ranTargetBranch == 'null':
ranTargetBranch = 'develop'
baseBranch = re.sub('origin/', '', ranTargetBranch)
sshSession.command(f'git clone --filter=blob:none -n -b {baseBranch} {ranRepository} .', '\$', 60)
if sshSession.getBefore().count('error') > 0 or sshSession.getBefore().count('error') > 0:
sys.exit('error during clone')
sshSession.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
sshSession.command('git config user.name "OAI Jenkins"', '\$', 5)
sshSession.command('mkdir -p cmake_targets/log', '\$', 5)
# if the commit ID is provided use it to point to it
sshSession.command(f'git checkout -f {ranCommitID}', '\$', 30)
if sshSession.getBefore().count(f'HEAD is now at {ranCommitID[:6]}') != 1:
sshSession.command('git log --oneline | head -n5', '\$', 5)
logging.warning(f'problems during checkout, is at: {sshSession.getBefore()}')
else:
logging.debug('successful checkout')
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if ranAllowMerge:
if ranTargetBranch == '':
ranTargetBranch = 'develop'
logging.debug(f'Merging with the target branch: {ranTargetBranch}')
sshSession.command(f'git merge --ff origin/{ranTargetBranch} -m "Temporary merge for CI"', '\$', 30)
def ImageTagToUse(imageName, ranCommitID, ranBranch, ranAllowMerge):
shortCommit = ranCommitID[0:8]
if ranAllowMerge:
tagToUse = f'{ranBranch}-{shortCommit}'
else:
tagToUse = f'develop-{shortCommit}'
fullTag = f'{imageName}:{tagToUse}'
return fullTag
def CopyLogsToExecutor(cmd, sourcePath, log_name):
cmd.cd(f'{sourcePath}/cmake_targets')
cmd.run(f'rm -f {log_name}.zip')
cmd.run(f'mkdir -p {log_name}')
cmd.run(f'mv log/* {log_name}')
cmd.run(f'zip -r -qq {log_name}.zip {log_name}')
# copy zip to executor for analysis
if (os.path.isfile(f'./{log_name}.zip')):
os.remove(f'./{log_name}.zip')
if (os.path.isdir(f'./{log_name}')):
shutil.rmtree(f'./{log_name}')
cmd.copyin(f'{sourcePath}/cmake_targets/{log_name}.zip', f'./{log_name}.zip')
cmd.run(f'rm -f {log_name}.zip')
ZipFile(f'{log_name}.zip').extractall('.')
def AnalyzeBuildLogs(buildRoot, images, globalStatus):
collectInfo = {}
for image in images:
files = {}
file_list = [f for f in os.listdir(f'{buildRoot}/{image}') if os.path.isfile(os.path.join(f'{buildRoot}/{image}', f)) and f.endswith('.txt')]
# Analyze the "sub-logs" of every target image
for fil in file_list:
errorandwarnings = {}
warningsNo = 0
errorsNo = 0
with open(f'{buildRoot}/{image}/{fil}', mode='r') as inputfile:
for line in inputfile:
result = re.search(' ERROR ', str(line))
if result is not None:
errorsNo += 1
result = re.search(' error:', str(line))
if result is not None:
errorsNo += 1
result = re.search(' WARNING ', str(line))
if result is not None:
warningsNo += 1
result = re.search(' warning:', str(line))
if result is not None:
warningsNo += 1
errorandwarnings['errors'] = errorsNo
errorandwarnings['warnings'] = warningsNo
errorandwarnings['status'] = globalStatus
files[fil] = errorandwarnings
# Analyze the target image
if os.path.isfile(f'{buildRoot}/{image}.log'):
errorandwarnings = {}
committed = False
tagged = False
with open(f'{buildRoot}/{image}.log', mode='r') as inputfile:
startOfTargetImageCreation = False # check for tagged/committed only after image created
for line in inputfile:
result = re.search(f'FROM .* [aA][sS] {image}$', str(line))
if result is not None:
startOfTargetImageCreation = True
if startOfTargetImageCreation:
lineHasTag = re.search(f'Successfully tagged {image}:', str(line)) is not None
tagged = tagged or lineHasTag
# the OpenShift Cluster builder prepends image registry URL
lineHasCommit = re.search(f'COMMIT [a-zA-Z0-9\.:/\-]*{image}', str(line)) is not None
committed = committed or lineHasCommit
errorandwarnings['errors'] = 0 if committed or tagged else 1
errorandwarnings['warnings'] = 0
errorandwarnings['status'] = committed or tagged
files['Target Image Creation'] = errorandwarnings
collectInfo[image] = files
return collectInfo
def AnalyzeIperf(cliOptions, clientReport, serverReport):
req_bw = 1.0 # default iperf throughput, in Mbps
result = re.search('-b *(?P<iperf_bandwidth>[0-9\.]+)(?P<magnitude>[kKMG])', cliOptions)
if result is not None:
req_bw = float(result.group('iperf_bandwidth'))
magn = result.group('magnitude')
if magn == "k" or magn == "K":
req_bw /= 1000
elif magn == "G":
req_bw *= 1000
req_dur = 10 # default iperf send duration
result = re.search('-t *(?P<duration>[0-9]+)', cliOptions)
if result is not None:
req_dur = int(result.group('duration'))
reportLine = None
# find server report in client status
clientReportLines = clientReport.split('\n')
for l in range(len(clientReportLines)):
res = re.search('read failed: Connection refused', clientReportLines[l])
if res is not None:
message = 'iperf connection refused by server!'
logging.error(f'\u001B[1;37;41mIperf Test FAIL: {message}\u001B[0m')
return (False, message)
res = re.search('Server Report:', clientReportLines[l])
if res is not None and l + 1 < len(clientReportLines):
reportLine = clientReportLines[l+1]
logging.debug(f'found server report: "{reportLine}"')
statusTemplate = '(?:|\[ *\d+\].*) +0\.0-\s*(?P<duration>[0-9\.]+) +sec +[0-9\.]+ [kKMG]Bytes +(?P<bitrate>[0-9\.]+) (?P<magnitude>[kKMG])bits\/sec +(?P<jitter>[0-9\.]+) ms +(\d+\/ *\d+) +(\((?P<packetloss>[0-9\.]+)%\))'
# if we do not find a server report in the client logs, check the server logs
# and use the last line which is typically close/identical to server report
if reportLine is None:
for l in serverReport.split('\n'):
res = re.search(statusTemplate, l)
if res is not None:
reportLine = l
if reportLine is None:
logging.warning('no report in server status found!')
return (False, 'could not parse iperf logs')
logging.debug(f'found client status: {reportLine}')
result = re.search(statusTemplate, reportLine)
if result is None:
logging.error('could not analyze report from statusTemplate')
return (False, 'could not parse iperf logs')
duration = float(result.group('duration'))
bitrate = float(result.group('bitrate'))
magn = result.group('magnitude')
if magn == "k" or magn == "K":
bitrate /= 1000
elif magn == "G": # we assume bitrate in Mbps, therefore it must be G now
bitrate *= 1000
jitter = float(result.group('jitter'))
packetloss = float(result.group('packetloss'))
logging.debug('\u001B[1;37;44m iperf result \u001B[0m')
msg = f'Req Bitrate: {req_bw}'
logging.debug(f'\u001B[1;34m{msg}\u001B[0m')
br_loss = bitrate/req_bw
bmsg = f'Bitrate : {bitrate} (perf {br_loss})'
logging.debug(f'\u001B[1;34m{bmsg}\u001B[0m')
msg += '\n' + bmsg
if br_loss < 0.9:
msg += '\nBitrate performance too low (<90%)'
logging.debug(f'\u001B[1;37;41mBitrate performance too low (<90%)\u001B[0m')
return (False, msg)
plmsg = f'Packet Loss: {packetloss}%'
logging.debug(f'\u001B[1;34m{plmsg}\u001B[0m')
msg += '\n' + plmsg
if packetloss > 5.0:
msg += '\nPacket Loss too high!'
logging.debug(f'\u001B[1;37;41mPacket Loss too high \u001B[0m')
return (False, msg)
dmsg = f'Duration : {duration} (req {req_dur})'
logging.debug(f'\u001B[1;34m{dmsg}\u001B[0m')
msg += '\n' + dmsg
if duration < float(req_dur):
msg += '\nDuration of iperf too short!'
logging.debug(f'\u001B[1;37;41mDuration of iperf too short\u001B[0m')
return (False, msg)
jmsg = f'Jitter : {jitter}'
logging.debug(f'\u001B[1;34m{jmsg}\u001B[0m')
msg += '\n' + jmsg
return (True, msg)
#-----------------------------------------------------------
# Class Declaration
#-----------------------------------------------------------
class Containerize():
def __init__(self):
self.ranRepository = ''
self.ranBranch = ''
self.ranAllowMerge = False
self.ranCommitID = ''
self.ranTargetBranch = ''
self.eNBIPAddress = ''
self.eNBUserName = ''
self.eNBPassword = ''
self.eNBSourceCodePath = ''
self.eNB1IPAddress = ''
self.eNB1UserName = ''
self.eNB1Password = ''
self.eNB1SourceCodePath = ''
self.eNB2IPAddress = ''
self.eNB2UserName = ''
self.eNB2Password = ''
self.eNB2SourceCodePath = ''
self.forcedWorkspaceCleanup = False
self.imageKind = ''
self.proxyCommit = None
self.eNB_instance = 0
self.eNB_serverId = ['', '', '']
self.yamlPath = ['', '', '']
self.services = ['', '', '']
self.nb_healthy = [0, 0, 0]
self.exitStatus = 0
self.eNB_logFile = ['', '', '']
self.testCase_id = ''
self.cli = ''
self.cliBuildOptions = ''
self.dockerfileprefix = ''
self.host = ''
self.deployedContainers = []
self.tsharkStarted = False
self.displayedNewTags = False
self.pingContName = ''
self.pingOptions = ''
self.pingLossThreshold = ''
self.svrContName = ''
self.svrOptions = ''
self.cliContName = ''
self.cliOptions = ''
self.imageToCopy = ''
self.registrySvrId = ''
self.testSvrId = ''
self.imageToPull = []
#checkers from xml
self.ran_checkers={}
#-----------------------------------------------------------
# Container management functions
#-----------------------------------------------------------
def BuildImage(self, HTML):
if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('Building on server: ' + lIpAddr)
cmd = cls_cmd.RemoteCmd(lIpAddr)
# Checking the hostname to get adapted on cli and dockerfileprefixes
cmd.run('hostnamectl')
result = re.search('Ubuntu|Red Hat', cmd.getBefore())
self.host = result.group(0)
if self.host == 'Ubuntu':
self.cli = 'docker'
self.dockerfileprefix = '.ubuntu20'
self.cliBuildOptions = '--no-cache'
elif self.host == 'Red Hat':
self.cli = 'sudo podman'
self.dockerfileprefix = '.rhel9'
self.cliBuildOptions = '--no-cache --disable-compression'
# we always build the ran-build image with all targets
imageNames = [('ran-build', 'build')]
result = re.search('eNB', self.imageKind)
# Creating a tupple with the imageName and the DockerFile prefix pattern on obelix
if result is not None:
imageNames.append(('oai-enb', 'eNB'))
else:
result = re.search('gNB', self.imageKind)
if result is not None:
imageNames.append(('oai-gnb', 'gNB'))
else:
result = re.search('all', self.imageKind)
if result is not None:
imageNames.append(('oai-enb', 'eNB'))
imageNames.append(('oai-gnb', 'gNB'))
imageNames.append(('oai-nr-cuup', 'nr-cuup'))
imageNames.append(('oai-lte-ue', 'lteUE'))
imageNames.append(('oai-nr-ue', 'nrUE'))
if self.host == 'Red Hat':
imageNames.append(('oai-physim', 'phySim'))
if self.host == 'Ubuntu':
imageNames.append(('oai-lte-ru', 'lteRU'))
# Workaround for some servers, we need to erase completely the workspace
if self.forcedWorkspaceCleanup:
cmd.run(f'sudo -S rm -Rf {lSourcePath}')
self.testCase_id = HTML.testCase_id
CreateWorkspace(cmd, lSourcePath, self.ranRepository, self.ranCommitID, self.ranTargetBranch, self.ranAllowMerge)
# if asterix, copy the entitlement and subscription manager configurations
if self.host == 'Red Hat':
cmd.run('mkdir -p ./etc-pki-entitlement ./rhsm-conf ./rhsm-ca')
cmd.run('cp /etc/rhsm/rhsm.conf ./rhsm-conf/')
cmd.run('cp /etc/rhsm/ca/redhat-uep.pem ./rhsm-ca/')
cmd.run('cp /etc/pki/entitlement/*.pem ./etc-pki-entitlement/')
baseImage = 'ran-base'
baseTag = 'develop'
forceBaseImageBuild = False
imageTag = 'develop'
if (self.ranAllowMerge):
imageTag = 'ci-temp'
if self.ranTargetBranch == 'develop':
cmd.run(f'git diff HEAD..origin/develop -- cmake_targets/build_oai cmake_targets/tools/build_helper docker/Dockerfile.base{self.dockerfileprefix} | grep --colour=never -i INDEX')
result = re.search('index', cmd.getBefore())
if result is not None:
forceBaseImageBuild = True
baseTag = 'ci-temp'
else:
forceBaseImageBuild = True
# Let's remove any previous run artifacts if still there
cmd.run(f"{self.cli} image prune --force")
if forceBaseImageBuild:
cmd.run(f"{self.cli} image rm {baseImage}:{baseTag}")
for image,pattern in imageNames:
cmd.run(f"{self.cli} image rm {image}:{imageTag}")
# Build the base image only on Push Events (not on Merge Requests)
# On when the base image docker file is being modified.
if forceBaseImageBuild:
cmd.run(f"{self.cli} build {self.cliBuildOptions} --target {baseImage} --tag {baseImage}:{baseTag} --file docker/Dockerfile.base{self.dockerfileprefix} . &> cmake_targets/log/ran-base.log", timeout=1600)
# First verify if the base image was properly created.
ret = cmd.run(f"{self.cli} image inspect --format=\'Size = {{{{.Size}}}} bytes\' {baseImage}:{baseTag}")
allImagesSize = {}
if ret.returncode != 0:
logging.error('\u001B[1m Could not build properly ran-base\u001B[0m')
# Recover the name of the failed container?
cmd.run(f"{self.cli} ps --quiet --filter \"status=exited\" -n1 | xargs {self.cli} rm -f")
cmd.run(f"{self.cli} image prune --force")
cmd.close()
logging.error('\u001B[1m Building OAI Images Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
else:
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', cmd.getBefore())
if result is not None:
size = float(result.group("size")) / 1000000
imageSizeStr = f'{size:.1f}'
logging.debug(f'\u001B[1m ran-base size is {imageSizeStr} Mbytes\u001B[0m')
allImagesSize['ran-base'] = f'{imageSizeStr} Mbytes'
else:
logging.debug('ran-base size is unknown')
# Recover build logs, for the moment only possible when build is successful
cmd.run(f"{self.cli} create --name test {baseImage}:{baseTag}")
cmd.run("mkdir -p cmake_targets/log/ran-base")
cmd.run(f"{self.cli} cp test:/oai-ran/cmake_targets/log/. cmake_targets/log/ran-base")
cmd.run(f"{self.cli} rm -f test")
# Build the target image(s)
status = True
attemptedImages = ['ran-base']
for image,pattern in imageNames:
attemptedImages += [image]
# the archived Dockerfiles have "ran-base:latest" as base image
# we need to update them with proper tag
cmd.run(f'sed -i -e "s#{baseImage}:latest#{baseImage}:{baseTag}#" docker/Dockerfile.{pattern}{self.dockerfileprefix}')
if image != 'ran-build':
cmd.run(f'sed -i -e "s#ran-build:latest#ran-build:{imageTag}#" docker/Dockerfile.{pattern}{self.dockerfileprefix}')
cmd.run(f'{self.cli} build {self.cliBuildOptions} --target {image} --tag {image}:{imageTag} --file docker/Dockerfile.{pattern}{self.dockerfileprefix} . > cmake_targets/log/{image}.log 2>&1', timeout=1200)
# split the log
cmd.run(f"mkdir -p cmake_targets/log/{image}")
cmd.run(f"python3 ci-scripts/docker_log_split.py --logfilename=cmake_targets/log/{image}.log")
# check the status of the build
ret = cmd.run(f"{self.cli} image inspect --format=\'Size = {{{{.Size}}}} bytes\' {image}:{imageTag}")
if ret.returncode != 0:
logging.error('\u001B[1m Could not build properly ' + image + '\u001B[0m')
status = False
# Here we should check if the last container corresponds to a failed command and destroy it
cmd.run(f"{self.cli} ps --quiet --filter \"status=exited\" -n1 | xargs {self.cli} rm -f")
allImagesSize[image] = 'N/A -- Build Failed'
break
else:
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', cmd.getBefore())
if result is not None:
size = float(result.group("size")) / 1000000 # convert to MB
imageSizeStr = f'{size:.1f}'
logging.debug(f'\u001B[1m {image} size is {imageSizeStr} Mbytes\u001B[0m')
allImagesSize[image] = f'{imageSizeStr} Mbytes'
else:
logging.debug(f'{image} size is unknown')
allImagesSize[image] = 'unknown'
# Now pruning dangling images in between target builds
cmd.run(f"{self.cli} image prune --force")
# Remove all intermediate build images and clean up
if self.ranAllowMerge and forceBaseImageBuild:
cmd.run(f"{self.cli} image rm {baseImage}:{baseTag}")
cmd.run(f"{self.cli} image rm ran-build:{imageTag}")
cmd.run(f"{self.cli} volume prune --force")
# create a zip with all logs
build_log_name = f'build_log_{self.testCase_id}'
CopyLogsToExecutor(cmd, lSourcePath, build_log_name)
cmd.close()
# Analyze the logs
collectInfo = AnalyzeBuildLogs(build_log_name, attemptedImages, status)
if status:
logging.info('\u001B[1m Building OAI Image(s) Pass\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'OK', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(collectInfo, allImagesSize)
else:
logging.error('\u001B[1m Building OAI Images Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(collectInfo, allImagesSize)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
def BuildProxy(self, HTML):
if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
if self.proxyCommit is None:
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter (need proxyCommit for proxy build)')
logging.debug('Building on server: ' + lIpAddr)
mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord)
# Check that we are on Ubuntu
mySSH.command('hostnamectl', '\$', 5)
result = re.search('Ubuntu', mySSH.getBefore())
self.host = result.group(0)
if self.host != 'Ubuntu':
logging.error('\u001B[1m Can build proxy only on Ubuntu server\u001B[0m')
mySSH.close()
sys.exit(1)
self.cli = 'docker'
self.cliBuildOptions = '--no-cache'
# Workaround for some servers, we need to erase completely the workspace
if self.forcedWorkspaceCleanup:
mySSH.command('echo ' + lPassWord + ' | sudo -S rm -Rf ' + lSourcePath, '\$', 15)
oldRanCommidID = self.ranCommitID
oldRanRepository = self.ranRepository
oldRanAllowMerge = self.ranAllowMerge
oldRanTargetBranch = self.ranTargetBranch
self.ranCommitID = self.proxyCommit
self.ranRepository = 'https://github.com/EpiSci/oai-lte-5g-multi-ue-proxy.git'
self.ranAllowMerge = False
self.ranTargetBranch = 'master'
CreateWorkspace(mySSH, lSourcePath, self.ranRepository, self.ranCommitID, self.ranTargetBranch, self.ranAllowMerge)
# to prevent accidentally overwriting data that might be used later
self.ranCommitID = oldRanCommidID
self.ranRepository = oldRanRepository
self.ranAllowMerge = oldRanAllowMerge
self.ranTargetBranch = oldRanTargetBranch
# Let's remove any previous run artifacts if still there
mySSH.command(self.cli + ' image prune --force', '\$', 30)
# Remove any previous proxy image
mySSH.command(self.cli + ' image rm oai-lte-multi-ue-proxy:latest || true', '\$', 30)
tag = self.proxyCommit
logging.debug('building L2sim proxy image for tag ' + tag)
# check if the corresponding proxy image with tag exists. If not, build it
mySSH.command(self.cli + ' image inspect --format=\'Size = {{.Size}} bytes\' proxy:' + tag, '\$', 5)
buildProxy = mySSH.getBefore().count('o such image') != 0
if buildProxy:
mySSH.command(self.cli + ' build ' + self.cliBuildOptions + ' --target oai-lte-multi-ue-proxy --tag proxy:' + tag + ' --file docker/Dockerfile.ubuntu18.04 . > cmake_targets/log/proxy-build.log 2>&1', '\$', 180)
# Note: at this point, OAI images are flattened, but we cannot do this
# here, as the flatten script is not in the proxy repo
mySSH.command(self.cli + ' image inspect --format=\'Size = {{.Size}} bytes\' proxy:' + tag, '\$', 5)
mySSH.command(self.cli + ' image prune --force || true','\$', 15)
if mySSH.getBefore().count('o such image') != 0:
logging.error('\u001B[1m Build of L2sim proxy failed\u001B[0m')
mySSH.close()
HTML.CreateHtmlTestRow('commit ' + tag, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
else:
logging.debug('L2sim proxy image for tag ' + tag + ' already exists, skipping build')
# retag the build images to that we pick it up later
mySSH.command('docker image tag proxy:' + tag + ' oai-lte-multi-ue-proxy:latest', '\$', 5)
# no merge: is a push to develop, tag the image so we can push it to the registry
if not self.ranAllowMerge:
mySSH.command('docker image tag proxy:' + tag + ' proxy:develop', '\$', 5)
# we assume that the host on which this is built will also run the proxy. The proxy
# currently requires the following command, and the docker-compose up mechanism of
# the CI does not allow to run arbitrary commands. Note that the following actually
# belongs to the deployment, not the build of the proxy...
logging.warning('the following command belongs to deployment, but no mechanism exists to exec it there!')
mySSH.command('sudo ifconfig lo: 127.0.0.2 netmask 255.0.0.0 up', '\$', 5)
# Analyzing the logs
if buildProxy:
self.testCase_id = HTML.testCase_id
mySSH.command('cd ' + lSourcePath + '/cmake_targets', '\$', 5)
mySSH.command('mkdir -p proxy_build_log_' + self.testCase_id, '\$', 5)
mySSH.command('mv log/* ' + 'proxy_build_log_' + self.testCase_id, '\$', 5)
if (os.path.isfile('./proxy_build_log_' + self.testCase_id + '.zip')):
os.remove('./proxy_build_log_' + self.testCase_id + '.zip')
if (os.path.isdir('./proxy_build_log_' + self.testCase_id)):
shutil.rmtree('./proxy_build_log_' + self.testCase_id)
mySSH.command('zip -r -qq proxy_build_log_' + self.testCase_id + '.zip proxy_build_log_' + self.testCase_id, '\$', 5)
mySSH.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/build_log_' + self.testCase_id + '.zip', '.')
# don't delete such that we might recover the zips
#mySSH.command('rm -f build_log_' + self.testCase_id + '.zip','\$', 5)
# we do not analyze the logs (we assume the proxy builds fine at this stage),
# but need to have the following information to correctly display the HTML
files = {}
errorandwarnings = {}
errorandwarnings['errors'] = 0
errorandwarnings['warnings'] = 0
errorandwarnings['status'] = True
files['Target Image Creation'] = errorandwarnings
collectInfo = {}
collectInfo['proxy'] = files
mySSH.command('docker image inspect --format=\'Size = {{.Size}} bytes\' proxy:' + tag, '\$', 5)
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', mySSH.getBefore())
allImagesSize = {}
if result is not None:
imageSize = float(result.group('size')) / 1000000
logging.debug('\u001B[1m proxy size is ' + ('%.0f' % imageSize) + ' Mbytes\u001B[0m')
allImagesSize['proxy'] = str(round(imageSize,1)) + ' Mbytes'
else:
logging.debug('proxy size is unknown')
allImagesSize['proxy'] = 'unknown'
# Cleaning any created tmp volume
mySSH.command(self.cli + ' volume prune --force || true','\$', 15)
mySSH.close()
logging.info('\u001B[1m Building L2sim Proxy Image Pass\u001B[0m')
HTML.CreateHtmlTestRow('commit ' + tag, 'OK', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(collectInfo, allImagesSize)
def Push_Image_to_Local_Registry(self, HTML):
if self.registrySvrId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.registrySvrId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.registrySvrId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('Pushing images from server: ' + lIpAddr)
mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord)
imagePrefix = 'porcepix.sboai.cs.eurecom.fr'
mySSH.command(f'docker login -u oaicicd -p oaicicd {imagePrefix}', '\$', 5)
if re.search('Login Succeeded', mySSH.getBefore()) is None:
msg = 'Could not log into local registry'
logging.error(msg)
mySSH.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
orgTag = 'develop'
if self.ranAllowMerge:
orgTag = 'ci-temp'
imageNames = ['oai-enb', 'oai-gnb', 'oai-lte-ue', 'oai-nr-ue', 'oai-lte-ru', 'oai-nr-cuup']
for image in imageNames:
tagToUse = ImageTagToUse(image, self.ranCommitID, self.ranBranch, self.ranAllowMerge)
mySSH.command(f'docker image tag {image}:{orgTag} {imagePrefix}/{tagToUse}', '\$', 5)
mySSH.command(f'docker push {imagePrefix}/{tagToUse}', '\$', 120)
if re.search(': digest:', mySSH.getBefore()) is None:
logging.debug(mySSH.getBefore())
msg = f'Could not push {image} to local registry : {tagToUse}'
logging.error(msg)
mySSH.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
mySSH.command(f'docker rmi {imagePrefix}/{tagToUse} {image}:{orgTag}', '\$', 30)
mySSH.command(f'docker logout {imagePrefix}', '\$', 5)
if re.search('Removing login credentials', mySSH.getBefore()) is None:
msg = 'Could not log off from local registry'
logging.error(msg)
mySSH.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
mySSH.close()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
return True
def Pull_Image_from_Local_Registry(self, HTML):
# This method can be called either onto a remote server (different from python executor)
# or directly on the python executor (ie lIpAddr == 'none')
if self.testSvrId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.testSvrId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.testSvrId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('\u001B[1m Pulling image(s) on server: ' + lIpAddr + '\u001B[0m')
myCmd = cls_cmd.getConnection(lIpAddr)
imagePrefix = 'porcepix.sboai.cs.eurecom.fr'
response = myCmd.run(f'docker login -u oaicicd -p oaicicd {imagePrefix}')
if response.returncode != 0:
msg = 'Could not log into local registry'
logging.error(msg)
myCmd.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
for image in self.imageToPull:
tagToUse = ImageTagToUse(image, self.ranCommitID, self.ranBranch, self.ranAllowMerge)
cmd = f'docker pull {imagePrefix}/{tagToUse}'
response = myCmd.run(cmd, timeout=120)
if response.returncode != 0:
logging.debug(response)
msg = f'Could not pull {image} from local registry : {tagToUse}'
logging.error(msg)
myCmd.close()
HTML.CreateHtmlTestRow('msg', 'KO', CONST.ALL_PROCESSES_OK)
return False
myCmd.run(f'docker tag {imagePrefix}/{tagToUse} oai-ci/{tagToUse}')
myCmd.run(f'docker rmi {imagePrefix}/{tagToUse}')
response = myCmd.run(f'docker logout {imagePrefix}')
if response.returncode != 0:
msg = 'Could not log off from local registry'
logging.error(msg)
myCmd.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
myCmd.close()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
return True
def Clean_Test_Server_Images(self, HTML):
# This method can be called either onto a remote server (different from python executor)
# or directly on the python executor (ie lIpAddr == 'none')
if self.testSvrId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.testSvrId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.testSvrId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
if lIpAddr != 'none':
logging.debug('Removing test images from server: ' + lIpAddr)
myCmd = cls_cmd.RemoteCmd(lIpAddr)
else:
logging.debug('Removing test images locally')
myCmd = cls_cmd.LocalCmd()
for image in IMAGES:
imageTag = ImageTagToUse(image, self.ranCommitID, self.ranBranch, self.ranAllowMerge)
cmd = f'docker rmi oai-ci/{imageTag}'
myCmd.run(cmd, reportNonZero=False)
myCmd.close()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
return True
def DeployObject(self, HTML, EPC):
if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('\u001B[1m Deploying OAI Object on server: ' + lIpAddr + '\u001B[0m')
mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord)
CreateWorkspace(mySSH, lSourcePath, self.ranRepository, self.ranCommitID, self.ranTargetBranch, self.ranAllowMerge)
mySSH.command('cd ' + lSourcePath + '/' + self.yamlPath[self.eNB_instance], '\$', 5)
mySSH.command('cp docker-compose.y*ml ci-docker-compose.yml', '\$', 5)
for image in IMAGES:
imageTag = ImageTagToUse(image, self.ranCommitID, self.ranBranch, self.ranAllowMerge)
mySSH.command(f'sed -i -e "s#image: {image}:latest#image: oai-ci/{imageTag}#" ci-docker-compose.yml', '\$', 2)
# Currently support only one
svcName = self.services[self.eNB_instance]
if svcName == '':
logging.warning('no service name given: starting all services in ci-docker-compose.yml!')
mySSH.command(f'docker-compose --file ci-docker-compose.yml up -d -- {svcName}', '\$', 30)
# Checking Status
grep = ''
if svcName != '': grep = f' | grep -A3 {svcName}'
mySSH.command(f'docker-compose --file ci-docker-compose.yml config {grep}', '\$', 5)
result = re.search('container_name: (?P<container_name>[a-zA-Z0-9\-\_]+)', mySSH.getBefore())
unhealthyNb = 0
healthyNb = 0
startingNb = 0
containerName = ''
usedImage = ''
imageInfo = ''
if result is not None:
containerName = result.group('container_name')
time.sleep(5)
cnt = 0
while (cnt < 3):
mySSH.command('docker inspect --format="{{.State.Health.Status}}" ' + containerName, '\$', 5)
unhealthyNb = mySSH.getBefore().count('unhealthy')
healthyNb = mySSH.getBefore().count('healthy') - unhealthyNb
startingNb = mySSH.getBefore().count('starting')
if healthyNb == 1:
cnt = 10
else:
time.sleep(10)
cnt += 1
mySSH.command('docker inspect --format="ImageUsed: {{.Config.Image}}" ' + containerName, '\$', 5)
for stdoutLine in mySSH.getBefore().split('\n'):
if stdoutLine.count('ImageUsed: oai-ci'):
usedImage = stdoutLine.replace('ImageUsed: oai-ci', 'oai-ci').strip()
logging.debug('Used image is ' + usedImage)
if usedImage != '':
mySSH.command('docker image inspect --format "* Size = {{.Size}} bytes\n* Creation = {{.Created}}\n* Id = {{.Id}}" ' + usedImage, '\$', 5, silent=True)
for stdoutLine in mySSH.getBefore().split('\n'):
if re.search('Size = [0-9]', stdoutLine) is not None:
imageInfo += stdoutLine.strip() + '\n'
if re.search('Creation = [0-9]', stdoutLine) is not None:
imageInfo += stdoutLine.strip() + '\n'
if re.search('Id = sha256', stdoutLine) is not None:
imageInfo += stdoutLine.strip() + '\n'
logging.debug(' -- ' + str(healthyNb) + ' healthy container(s)')
logging.debug(' -- ' + str(unhealthyNb) + ' unhealthy container(s)')
logging.debug(' -- ' + str(startingNb) + ' still starting container(s)')
self.testCase_id = HTML.testCase_id
self.eNB_logFile[self.eNB_instance] = 'enb_' + self.testCase_id + '.log'
status = False
if healthyNb == 1:
cnt = 0
while (cnt < 20):
mySSH.command('docker logs ' + containerName + ' | egrep --text --color=never -i "wait|sync|Starting"', '\$', 30)
result = re.search('got sync|Starting E1AP at CU UP|Starting F1AP at CU|Got sync|Waiting for RUs to be configured', mySSH.getBefore())
if result is None:
time.sleep(6)
cnt += 1
else:
cnt = 100
status = True
logging.info('\u001B[1m Deploying OAI object Pass\u001B[0m')
time.sleep(10)
else:
# containers are unhealthy, so we won't start. However, logs are stored at the end
# in UndeployObject so we here store the logs of the unhealthy container to report it
logfilename = f'{lSourcePath}/cmake_targets/log/{self.eNB_logFile[self.eNB_instance]}'
mySSH.command(f'docker logs {containerName} > {logfilename}', '\$', 30)
mySSH.copyin(lIpAddr, lUserName, lPassWord, logfilename, '.')
mySSH.close()
message = ''
if usedImage != '':
message += f'Used Image = {usedImage} :\n'
message += imageInfo
else:
message += 'Could not retrieve used image info!\n'
if status:
message += '\nHealthy deployment!\n'
else:
message += '\nUnhealthy deployment! -- Check logs for reason!\n'
if status:
HTML.CreateHtmlTestRowQueue('N/A', 'OK', [message])
else:
self.exitStatus = 1
HTML.CreateHtmlTestRowQueue('N/A', 'KO', [message])
def UndeployObject(self, HTML, RAN):
if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('\u001B[1m Undeploying OAI Object from server: ' + lIpAddr + '\u001B[0m')
mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord)
mySSH.command('cd ' + lSourcePath + '/' + self.yamlPath[self.eNB_instance], '\$', 5)
svcName = self.services[self.eNB_instance]
forceDown = False
if svcName != '':
logging.warning(f'service name given, but will stop all services in ci-docker-compose.yml!')
svcName = ''
mySSH.command(f'docker-compose -f ci-docker-compose.yml config --services', '\$', 5)
# first line has command, last line has next command prompt
allServices = mySSH.getBefore().split('\r\n')[1:-1]
services = []
for s in allServices:
mySSH.command(f'docker-compose -f ci-docker-compose.yml ps --all -- {s}', '\$', 5, silent=False)
running = mySSH.getBefore().split('\r\n')[2:-1]
logging.debug(f'running services: {running}')
if len(running) > 0: # something is running for that service
services.append(s)
logging.info(f'stopping services {services}')
mySSH.command(f'docker-compose -f ci-docker-compose.yml stop -t3', '\$', 30)
time.sleep(5) # give some time to running containers to stop
for svcName in services:
# head -n -1 suppresses the final "X exited with status code Y"
filename = f'{svcName}-{HTML.testCase_id}.log'
#mySSH.command(f'docker-compose -f ci-docker-compose.yml logs --no-log-prefix -- {svcName} | head -n -1 &> {lSourcePath}/cmake_targets/log/{filename}', '\$', 30)
mySSH.command(f'docker-compose -f ci-docker-compose.yml logs --no-log-prefix -- {svcName} &> {lSourcePath}/cmake_targets/log/{filename}', '\$', 120)
mySSH.command('docker-compose -f ci-docker-compose.yml down', '\$', 5)
# Cleaning any created tmp volume
mySSH.command('docker volume prune --force', '\$', 20)
mySSH.close()
# Analyzing log file!
files = ','.join([f'{s}-{HTML.testCase_id}' for s in services])
if len(services) > 1:
files = '{' + files + '}'