aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrenda J. Butler <bjb@mojatatu.com>2018-02-14 14:09:19 -0500
committerDavid S. Miller <davem@davemloft.net>2018-02-15 15:38:33 -0500
commitf87c7f646c6f704b29504d63c4ab965584c4aacb (patch)
tree679cdac7a53cf582ce75a4ade3791dc0a03804bd
parentc402fb7e380c142f2ad3e94d4a1a096fc55b643f (diff)
tools: tc-testing: Command line parms
Separate the functionality of the command line parameters into "selection" parameters, "action" parameters and other parameters. "Selection" parameters are for choosing which tests on which to act. "Action" parameters are for choosing what to do with the selected tests. "Other" parameters are for global effect (like "help" or "verbose"). With this commit, we add the ability to name a directory as another selection mechanism. We can accumulate a number of tests by directory, file, category, or even by test id, instead of being constrained to run all tests in one collection or just one test. Signed-off-by: Brenda J. Butler <bjb@mojatatu.com> Acked-by: Lucas Bates <lucasb@mojatatu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt35
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py209
-rw-r--r--tools/testing/selftests/tc-testing/tdc_helper.py15
3 files changed, 164 insertions, 95 deletions
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
index 00438331ba47..17b267dedbd9 100644
--- a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
+++ b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
@@ -12,14 +12,18 @@ template.json for the required JSON format for test cases.
12Include the 'id' field, but do not assign a value. Running tdc with the -i 12Include the 'id' field, but do not assign a value. Running tdc with the -i
13option will generate a unique ID for that test case. 13option will generate a unique ID for that test case.
14 14
15tdc will recursively search the 'tc' subdirectory for .json files. Any 15tdc will recursively search the 'tc-tests' subdirectory (or the
16test case files you create in these directories will automatically be included. 16directories named with the -D option) for .json files. Any test case
17If you wish to store your custom test cases elsewhere, be sure to run tdc 17files you create in these directories will automatically be included.
18with the -f argument and the path to your file. 18If you wish to store your custom test cases elsewhere, be sure to run
19tdc with the -f argument and the path to your file, or the -D argument
20and the path to your directory(ies).
19 21
20Be aware of required escape characters in the JSON data - particularly when 22Be aware of required escape characters in the JSON data - particularly
21defining the match pattern. Refer to the tctests.json file for examples when 23when defining the match pattern. Refer to the supplied json test files
22in doubt. 24for examples when in doubt. The match pattern is written in json, and
25will be used by python. So the match pattern will be a python regular
26expression, but should be written using json syntax.
23 27
24 28
25TEST CASE STRUCTURE 29TEST CASE STRUCTURE
@@ -69,7 +73,8 @@ SETUP/TEARDOWN ERRORS
69If an error is detected during the setup/teardown process, execution of the 73If an error is detected during the setup/teardown process, execution of the
70tests will immediately stop with an error message and the namespace in which 74tests will immediately stop with an error message and the namespace in which
71the tests are run will be destroyed. This is to prevent inaccurate results 75the tests are run will be destroyed. This is to prevent inaccurate results
72in the test cases. 76in the test cases. tdc will output a series of TAP results for the skipped
77tests.
73 78
74Repeated failures of the setup/teardown may indicate a problem with the test 79Repeated failures of the setup/teardown may indicate a problem with the test
75case, or possibly even a bug in one of the commands that are not being tested. 80case, or possibly even a bug in one of the commands that are not being tested.
@@ -79,3 +84,17 @@ so that it doesn't halt the script for an error that doesn't matter. Turn the
79individual command into a list, with the command being first, followed by all 84individual command into a list, with the command being first, followed by all
80acceptable exit codes for the command. 85acceptable exit codes for the command.
81 86
87Example:
88
89A pair of setup commands. The first can have exit code 0, 1 or 255, the
90second must have exit code 0.
91
92 "setup": [
93 [
94 "$TC actions flush action gact",
95 0,
96 1,
97 255
98 ],
99 "$TC actions add action reclassify index 65536"
100 ],
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index fc373fdf2bdc..ef3a8881e458 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -209,20 +209,41 @@ def set_args(parser):
209 """ 209 """
210 Set the command line arguments for tdc. 210 Set the command line arguments for tdc.
211 """ 211 """
212 parser.add_argument('-p', '--path', type=str, 212 parser.add_argument(
213 help='The full path to the tc executable to use') 213 '-p', '--path', type=str,
214 parser.add_argument('-c', '--category', type=str, nargs='?', const='+c', 214 help='The full path to the tc executable to use')
215 help='Run tests only from the specified category, or if no category is specified, list known categories.') 215 sg = parser.add_argument_group(
216 parser.add_argument('-f', '--file', type=str, 216 'selection', 'select which test cases: ' +
217 help='Run tests from the specified file') 217 'files plus directories; filtered by categories plus testids')
218 parser.add_argument('-l', '--list', type=str, nargs='?', const="++", metavar='CATEGORY', 218 ag = parser.add_argument_group(
219 help='List all test cases, or those only within the specified category') 219 'action', 'select action to perform on selected test cases')
220 parser.add_argument('-s', '--show', type=str, nargs=1, metavar='ID', dest='showID', 220
221 help='Display the test case with specified id') 221 sg.add_argument(
222 parser.add_argument('-e', '--execute', type=str, nargs=1, metavar='ID', 222 '-D', '--directory', nargs='+', metavar='DIR',
223 help='Execute the single test case with specified ID') 223 help='Collect tests from the specified directory(ies) ' +
224 parser.add_argument('-i', '--id', action='store_true', dest='gen_id', 224 '(default [tc-tests])')
225 help='Generate ID numbers for new test cases') 225 sg.add_argument(
226 '-f', '--file', nargs='+', metavar='FILE',
227 help='Run tests from the specified file(s)')
228 sg.add_argument(
229 '-c', '--category', nargs='*', metavar='CATG', default=['+c'],
230 help='Run tests only from the specified category/ies, ' +
231 'or if no category/ies is/are specified, list known categories.')
232 sg.add_argument(
233 '-e', '--execute', nargs='+', metavar='ID',
234 help='Execute the specified test cases with specified IDs')
235 ag.add_argument(
236 '-l', '--list', action='store_true',
237 help='List all test cases, or those only within the specified category')
238 ag.add_argument(
239 '-s', '--show', action='store_true', dest='showID',
240 help='Display the selected test cases')
241 ag.add_argument(
242 '-i', '--id', action='store_true', dest='gen_id',
243 help='Generate ID numbers for new test cases')
244 parser.add_argument(
245 '-v', '--verbose', action='count', default=0,
246 help='Show the commands that are being run')
226 parser.add_argument('-d', '--device', 247 parser.add_argument('-d', '--device',
227 help='Execute the test case in flower category') 248 help='Execute the test case in flower category')
228 return parser 249 return parser
@@ -257,7 +278,16 @@ def check_case_id(alltests):
257 Check for duplicate test case IDs. 278 Check for duplicate test case IDs.
258 """ 279 """
259 idl = get_id_list(alltests) 280 idl = get_id_list(alltests)
281 # print('check_case_id: idl is {}'.format(idl))
282 # answer = list()
283 # for x in idl:
284 # print('Looking at {}'.format(x))
285 # print('what the heck is idl.count(x)??? {}'.format(idl.count(x)))
286 # if idl.count(x) > 1:
287 # answer.append(x)
288 # print(' ... append it {}'.format(x))
260 return [x for x in idl if idl.count(x) > 1] 289 return [x for x in idl if idl.count(x) > 1]
290 return answer
261 291
262 292
263def does_id_exist(alltests, newid): 293def does_id_exist(alltests, newid):
@@ -300,28 +330,96 @@ def generate_case_ids(alltests):
300 json.dump(testlist, outfile, indent=4) 330 json.dump(testlist, outfile, indent=4)
301 outfile.close() 331 outfile.close()
302 332
333def filter_tests_by_id(args, testlist):
334 '''
335 Remove tests from testlist that are not in the named id list.
336 If id list is empty, return empty list.
337 '''
338 newlist = list()
339 if testlist and args.execute:
340 target_ids = args.execute
341
342 if isinstance(target_ids, list) and (len(target_ids) > 0):
343 newlist = list(filter(lambda x: x['id'] in target_ids, testlist))
344 return newlist
345
346def filter_tests_by_category(args, testlist):
347 '''
348 Remove tests from testlist that are not in a named category.
349 '''
350 answer = list()
351 if args.category and testlist:
352 test_ids = list()
353 for catg in set(args.category):
354 if catg == '+c':
355 continue
356 print('considering category {}'.format(catg))
357 for tc in testlist:
358 if catg in tc['category'] and tc['id'] not in test_ids:
359 answer.append(tc)
360 test_ids.append(tc['id'])
361
362 return answer
303 363
304def get_test_cases(args): 364def get_test_cases(args):
305 """ 365 """
306 If a test case file is specified, retrieve tests from that file. 366 If a test case file is specified, retrieve tests from that file.
307 Otherwise, glob for all json files in subdirectories and load from 367 Otherwise, glob for all json files in subdirectories and load from
308 each one. 368 each one.
369 Also, if requested, filter by category, and add tests matching
370 certain ids.
309 """ 371 """
310 import fnmatch 372 import fnmatch
311 if args.file != None: 373
312 if not os.path.isfile(args.file): 374 flist = []
313 print("The specified test case file " + args.file + " does not exist.") 375 testdirs = ['tc-tests']
314 exit(1) 376
315 flist = [args.file] 377 if args.file:
316 else: 378 # at least one file was specified - remove the default directory
317 flist = [] 379 testdirs = []
318 for root, dirnames, filenames in os.walk('tc-tests'): 380
381 for ff in args.file:
382 if not os.path.isfile(ff):
383 print("IGNORING file " + ff + " \n\tBECAUSE does not exist.")
384 else:
385 flist.append(os.path.abspath(ff))
386
387 if args.directory:
388 testdirs = args.directory
389
390 for testdir in testdirs:
391 for root, dirnames, filenames in os.walk(testdir):
319 for filename in fnmatch.filter(filenames, '*.json'): 392 for filename in fnmatch.filter(filenames, '*.json'):
320 flist.append(os.path.join(root, filename)) 393 candidate = os.path.abspath(os.path.join(root, filename))
321 alltests = list() 394 if candidate not in testdirs:
395 flist.append(candidate)
396
397 alltestcases = list()
322 for casefile in flist: 398 for casefile in flist:
323 alltests = alltests + (load_from_file(casefile)) 399 alltestcases = alltestcases + (load_from_file(casefile))
324 return alltests 400
401 allcatlist = get_test_categories(alltestcases)
402 allidlist = get_id_list(alltestcases)
403
404 testcases_by_cats = get_categorized_testlist(alltestcases, allcatlist)
405 idtestcases = filter_tests_by_id(args, alltestcases)
406 cattestcases = filter_tests_by_category(args, alltestcases)
407
408 cat_ids = [x['id'] for x in cattestcases]
409 if args.execute:
410 if args.category:
411 alltestcases = cattestcases + [x for x in idtestcases if x['id'] not in cat_ids]
412 else:
413 alltestcases = idtestcases
414 else:
415 if cat_ids:
416 alltestcases = cattestcases
417 else:
418 # just accept the existing value of alltestcases,
419 # which has been filtered by file/directory
420 pass
421
422 return allcatlist, allidlist, testcases_by_cats, alltestcases
325 423
326 424
327def set_operation_mode(args): 425def set_operation_mode(args):
@@ -330,10 +428,9 @@ def set_operation_mode(args):
330 what the script should do for this run, and call the appropriate 428 what the script should do for this run, and call the appropriate
331 function. 429 function.
332 """ 430 """
333 alltests = get_test_cases(args) 431 ucat, idlist, testcases, alltests = get_test_cases(args)
334 432
335 if args.gen_id: 433 if args.gen_id:
336 idlist = get_id_list(alltests)
337 if (has_blank_ids(idlist)): 434 if (has_blank_ids(idlist)):
338 alltests = generate_case_ids(alltests) 435 alltests = generate_case_ids(alltests)
339 else: 436 else:
@@ -347,42 +444,20 @@ def set_operation_mode(args):
347 print("Please correct them before continuing.") 444 print("Please correct them before continuing.")
348 exit(1) 445 exit(1)
349 446
350 ucat = get_test_categories(alltests)
351
352 if args.showID: 447 if args.showID:
353 show_test_case_by_id(alltests, args.showID[0]) 448 for atest in alltests:
449 print_test_case(atest)
354 exit(0) 450 exit(0)
355 451
356 if args.execute: 452 if isinstance(args.category, list) and (len(args.category) == 0):
357 target_id = args.execute[0] 453 print("Available categories:")
358 else: 454 print_sll(ucat)
359 target_id = "" 455 exit(0)
360
361 if args.category:
362 if (args.category == '+c'):
363 print("Available categories:")
364 print_sll(ucat)
365 exit(0)
366 else:
367 target_category = args.category
368 else:
369 target_category = ""
370
371
372 testcases = get_categorized_testlist(alltests, ucat)
373 456
374 if args.list: 457 if args.list:
375 if (args.list == "++"): 458 if args.list:
376 list_test_cases(alltests) 459 list_test_cases(alltests)
377 exit(0) 460 exit(0)
378 elif(len(args.list) > 0):
379 if (args.list not in ucat):
380 print("Unknown category " + args.list)
381 print("Available categories:")
382 print_sll(ucat)
383 exit(1)
384 list_test_cases(testcases[args.list])
385 exit(0)
386 461
387 if (os.geteuid() != 0): 462 if (os.geteuid() != 0):
388 print("This script must be run with root privileges.\n") 463 print("This script must be run with root privileges.\n")
@@ -390,24 +465,8 @@ def set_operation_mode(args):
390 465
391 ns_create() 466 ns_create()
392 467
393 if (len(target_category) == 0): 468 catresults = test_runner(alltests, args)
394 if (len(target_id) > 0): 469 print('All test results: \n\n{}'.format(catresults))
395 alltests = list(filter(lambda x: target_id in x['id'], alltests))
396 if (len(alltests) == 0):
397 print("Cannot find a test case with ID matching " + target_id)
398 exit(1)
399 catresults = test_runner(alltests, args)
400 print("All test results: " + "\n\n" + catresults)
401 elif (len(target_category) > 0):
402 if (target_category == "flower") and args.device == None:
403 print("Please specify a NIC device (-d) to run category flower")
404 exit(1)
405 if (target_category not in ucat):
406 print("Specified category is not present in this file.")
407 exit(1)
408 else:
409 catresults = test_runner(testcases[target_category], args)
410 print("Category " + target_category + "\n\n" + catresults)
411 470
412 ns_destroy() 471 ns_destroy()
413 472
diff --git a/tools/testing/selftests/tc-testing/tdc_helper.py b/tools/testing/selftests/tc-testing/tdc_helper.py
index db381120a566..9f35c96c88a0 100644
--- a/tools/testing/selftests/tc-testing/tdc_helper.py
+++ b/tools/testing/selftests/tc-testing/tdc_helper.py
@@ -57,20 +57,11 @@ def print_sll(items):
57 57
58def print_test_case(tcase): 58def print_test_case(tcase):
59 """ Pretty-printing of a given test case. """ 59 """ Pretty-printing of a given test case. """
60 print('\n==============\nTest {}\t{}\n'.format(tcase['id'], tcase['name']))
60 for k in tcase.keys(): 61 for k in tcase.keys():
61 if (isinstance(tcase[k], list)): 62 if (isinstance(tcase[k], list)):
62 print(k + ":") 63 print(k + ":")
63 print_list(tcase[k]) 64 print_list(tcase[k])
64 else: 65 else:
65 print(k + ": " + tcase[k]) 66 if not ((k == 'id') or (k == 'name')):
66 67 print(k + ": " + str(tcase[k]))
67
68def show_test_case_by_id(testlist, caseID):
69 """ Find the specified test case to pretty-print. """
70 if not any(d.get('id', None) == caseID for d in testlist):
71 print("That ID does not exist.")
72 exit(1)
73 else:
74 print_test_case(next((d for d in testlist if d['id'] == caseID)))
75
76