1/* Copyright (c) 2006, 2010, Oracle and/or its affiliates.
2 Copyright (c) 2011, Monty Program Ab
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; version 2 of the License.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
16
17 Library for providing TAP support for testing C and C++ was written
18 by Mats Kindahl <mats@mysql.com>.
19*/
20
21#include "tap.h"
22
23#include "my_global.h"
24
25#include <stdlib.h>
26#include <stdarg.h>
27#include <stdio.h>
28#include <string.h>
29#include <signal.h>
30
31static ulong start_timer(void);
32static void end_timer(ulong start_time,char *buff);
33static void nice_time(double sec,char *buff,my_bool part_second);
34
35/*
36 Visual Studio 2003 does not know vsnprintf but knows _vsnprintf.
37 We don't put this #define elsewhere because we prefer my_vsnprintf
38 everywhere instead, except when linking with libmysys is not
39 desirable - the case here.
40*/
41#if defined(_MSC_VER) && ( _MSC_VER == 1310 )
42#define vsnprintf _vsnprintf
43#endif
44
45/**
46 @defgroup MyTAP_Internal MyTAP Internals
47
48 Internal functions and data structures for the MyTAP implementation.
49*/
50
51/**
52 Test data structure.
53
54 Data structure containing all information about the test suite.
55
56 @ingroup MyTAP_Internal
57 */
58static TEST_DATA g_test = { NO_PLAN, 0, 0, "" };
59
60/**
61 Output stream for test report message.
62
63 The macro is just a temporary solution.
64
65 @ingroup MyTAP_Internal
66 */
67#define tapout stdout
68
69/**
70 Emit the beginning of a test line, that is: "(not) ok", test number,
71 and description.
72
73 To emit the directive, use the emit_dir() function
74
75 @ingroup MyTAP_Internal
76
77 @see emit_dir
78
79 @param pass 'true' if test passed, 'false' otherwise
80 @param fmt Description of test in printf() format.
81 @param ap Vararg list for the description string above.
82 */
83static void
84vemit_tap(int pass, char const *fmt, va_list ap)
85{
86 fprintf(tapout, "%sok %d%s",
87 pass ? "" : "not ",
88 ++g_test.last,
89 (fmt && *fmt) ? " - " : "");
90 if (fmt && *fmt)
91 vfprintf(tapout, fmt, ap);
92 fflush(tapout);
93}
94
95
96/**
97 Emit a TAP directive.
98
99 TAP directives are comments after that have the form:
100
101 @code
102 ok 1 # skip reason for skipping
103 not ok 2 # todo some text explaining what remains
104 @endcode
105
106 @ingroup MyTAP_Internal
107
108 @param dir Directive as a string
109 @param why Explanation string
110 */
111static void
112emit_dir(const char *dir, const char *why)
113{
114 fprintf(tapout, " # %s %s", dir, why);
115 fflush(tapout);
116}
117
118
119/**
120 Emit a newline to the TAP output stream.
121
122 @ingroup MyTAP_Internal
123 */
124static void
125emit_endl()
126{
127 fprintf(tapout, "\n");
128 fflush(tapout);
129}
130
131static void
132handle_core_signal(int signo)
133{
134 BAIL_OUT("Signal %d thrown\n", signo);
135}
136
137void
138BAIL_OUT(char const *fmt, ...)
139{
140 va_list ap;
141 va_start(ap, fmt);
142 fprintf(tapout, "Bail out! ");
143 vfprintf(tapout, fmt, ap);
144 diag("%d tests planned, %d failed, %d was last executed",
145 g_test.plan, g_test.failed, g_test.last);
146 emit_endl();
147 va_end(ap);
148 exit(255);
149}
150
151
152void
153diag(char const *fmt, ...)
154{
155 va_list ap;
156 va_start(ap, fmt);
157 fprintf(tapout, "# ");
158 vfprintf(tapout, fmt, ap);
159 emit_endl();
160 va_end(ap);
161}
162
163typedef struct signal_entry {
164 int signo;
165 void (*handler)(int);
166} signal_entry;
167
168static signal_entry install_signal[]= {
169 { SIGINT, handle_core_signal },
170 { SIGQUIT, handle_core_signal },
171 { SIGILL, handle_core_signal },
172 { SIGABRT, handle_core_signal },
173 { SIGFPE, handle_core_signal },
174 { SIGSEGV, handle_core_signal }
175#ifdef SIGBUS
176 , { SIGBUS, handle_core_signal }
177#endif
178#ifdef SIGXCPU
179 , { SIGXCPU, handle_core_signal }
180#endif
181#ifdef SIGXCPU
182 , { SIGXFSZ, handle_core_signal }
183#endif
184#ifdef SIGXCPU
185 , { SIGSYS, handle_core_signal }
186#endif
187#ifdef SIGXCPU
188 , { SIGTRAP, handle_core_signal }
189#endif
190};
191
192int skip_big_tests= 1;
193ulong start_time= 0;
194
195void
196plan(int count)
197{
198 char *config= getenv("MYTAP_CONFIG");
199 size_t i;
200
201 start_time= start_timer();
202
203 if (config)
204 skip_big_tests= strcmp(config, "big");
205
206 setvbuf(tapout, 0, _IONBF, 0); /* provide output at once */
207 /*
208 Install signal handler
209 */
210
211 for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i)
212 signal(install_signal[i].signo, install_signal[i].handler);
213
214 g_test.plan= count;
215 switch (count)
216 {
217 case NO_PLAN:
218 break;
219 default:
220 if (count > 0)
221 {
222 fprintf(tapout, "1..%d\n", count);
223 fflush(tapout);
224 }
225 break;
226 }
227}
228
229
230void
231skip_all(char const *reason, ...)
232{
233 va_list ap;
234 va_start(ap, reason);
235 fprintf(tapout, "1..0 # skip ");
236 vfprintf(tapout, reason, ap);
237 fflush(tapout);
238 va_end(ap);
239 exit(0);
240}
241
242void
243ok(int pass, char const *fmt, ...)
244{
245 va_list ap;
246 va_start(ap, fmt);
247
248 if (!pass && *g_test.todo == '\0')
249 ++g_test.failed;
250
251 vemit_tap(pass, fmt, ap);
252 va_end(ap);
253 if (*g_test.todo != '\0')
254 emit_dir("todo", g_test.todo);
255 emit_endl();
256}
257
258void
259ok1(int const pass)
260{
261 va_list ap;
262
263 memset(&ap, 0, sizeof(ap));
264
265 if (!pass && *g_test.todo == '\0')
266 ++g_test.failed;
267
268 vemit_tap(pass, NULL, ap);
269
270 if (*g_test.todo != '\0')
271 emit_dir("todo", g_test.todo);
272
273 emit_endl();
274}
275
276void
277skip(int how_many, char const * const fmt, ...)
278{
279 char reason[80];
280 if (fmt && *fmt)
281 {
282 va_list ap;
283 va_start(ap, fmt);
284 vsnprintf(reason, sizeof(reason), fmt, ap);
285 va_end(ap);
286 }
287 else
288 reason[0] = '\0';
289
290 while (how_many-- > 0)
291 {
292 va_list ap;
293 memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */
294 vemit_tap(1, NULL, ap);
295 emit_dir("skip", reason);
296 emit_endl();
297 }
298}
299
300
301void
302todo_start(char const *message, ...)
303{
304 va_list ap;
305 va_start(ap, message);
306 vsnprintf(g_test.todo, sizeof(g_test.todo), message, ap);
307 va_end(ap);
308}
309
310void
311todo_end()
312{
313 *g_test.todo = '\0';
314}
315
316int exit_status()
317{
318 char buff[60];
319
320 /*
321 If there were no plan, we write one last instead.
322 */
323 if (g_test.plan == NO_PLAN)
324 plan(g_test.last);
325
326 if (g_test.plan != g_test.last)
327 {
328 diag("%d tests planned but%s %d executed",
329 g_test.plan, (g_test.plan > g_test.last ? " only" : ""), g_test.last);
330 return EXIT_FAILURE;
331 }
332
333 if (g_test.failed > 0)
334 {
335 diag("Failed %d tests!", g_test.failed);
336 return EXIT_FAILURE;
337 }
338 if (start_time)
339 {
340 end_timer(start_time, buff);
341 printf("Test took %s\n", buff);
342 fflush(stdout);
343 }
344
345 return EXIT_SUCCESS;
346}
347
348#if defined(__WIN__) || defined(__NETWARE__)
349#include <time.h>
350#else
351#include <sys/times.h>
352#ifdef _SC_CLK_TCK // For mit-pthreads
353#undef CLOCKS_PER_SEC
354#define CLOCKS_PER_SEC (sysconf(_SC_CLK_TCK))
355#endif
356#endif
357
358static ulong start_timer(void)
359{
360#if defined(__WIN__) || defined(__NETWARE__)
361 return clock();
362#else
363 struct tms tms_tmp;
364 return times(&tms_tmp);
365#endif
366}
367
368
369/**
370 Write as many as 52+1 bytes to buff, in the form of a legible
371 duration of time.
372
373 len("4294967296 days, 23 hours, 59 minutes, 60.00 seconds") -> 52
374*/
375
376static void nice_time(double sec,char *buff, my_bool part_second)
377{
378 ulong tmp;
379 if (sec >= 3600.0*24)
380 {
381 tmp=(ulong) (sec/(3600.0*24));
382 sec-=3600.0*24*tmp;
383 buff+= sprintf(buff, "%ld %s", tmp, tmp > 1 ? " days " : " day ");
384 }
385 if (sec >= 3600.0)
386 {
387 tmp=(ulong) (sec/3600.0);
388 sec-=3600.0*tmp;
389 buff+= sprintf(buff, "%ld %s", tmp, tmp > 1 ? " hours " : " hour ");
390 }
391 if (sec >= 60.0)
392 {
393 tmp=(ulong) (sec/60.0);
394 sec-=60.0*tmp;
395 buff+= sprintf(buff, "%ld min ", tmp);
396 }
397 if (part_second)
398 sprintf(buff,"%.2f sec",sec);
399 else
400 sprintf(buff,"%d sec",(int) sec);
401}
402
403
404static void end_timer(ulong start_time,char *buff)
405{
406 nice_time((double) (start_timer() - start_time) /
407 CLOCKS_PER_SEC,buff,1);
408}
409
410
411/**
412 @mainpage Testing C and C++ using MyTAP
413
414 @section IntroSec Introduction
415
416 Unit tests are used to test individual components of a system. In
417 contrast, functional tests usually test the entire system. The
418 rationale is that each component should be correct if the system is
419 to be correct. Unit tests are usually small pieces of code that
420 tests an individual function, class, a module, or other unit of the
421 code.
422
423 Observe that a correctly functioning system can be built from
424 "faulty" components. The problem with this approach is that as the
425 system evolves, the bugs surface in unexpected ways, making
426 maintenance harder.
427
428 The advantages of using unit tests to test components of the system
429 are several:
430
431 - The unit tests can make a more thorough testing than the
432 functional tests by testing correctness even for pathological use
433 (which shouldn't be present in the system). This increases the
434 overall robustness of the system and makes maintenance easier.
435
436 - It is easier and faster to find problems with a malfunctioning
437 component than to find problems in a malfunctioning system. This
438 shortens the compile-run-edit cycle and therefore improves the
439 overall performance of development.
440
441 - The component has to support at least two uses: in the system and
442 in a unit test. This leads to more generic and stable interfaces
443 and in addition promotes the development of reusable components.
444
445 For example, the following are typical functional tests:
446 - Does transactions work according to specifications?
447 - Can we connect a client to the server and execute statements?
448
449 In contrast, the following are typical unit tests:
450
451 - Can the 'String' class handle a specified list of character sets?
452 - Does all operations for 'my_bitmap' produce the correct result?
453 - Does all the NIST test vectors for the AES implementation encrypt
454 correctly?
455
456
457 @section UnitTest Writing unit tests
458
459 The purpose of writing unit tests is to use them to drive component
460 development towards a solution that passes the tests. This means that the
461 unit tests has to be as complete as possible, testing at least:
462
463 - Normal input
464 - Borderline cases
465 - Faulty input
466 - Error handling
467 - Bad environment
468
469 @subsection NormalSubSec Normal input
470
471 This is to test that the component have the expected behaviour.
472 This is just plain simple: test that it works. For example, test
473 that you can unpack what you packed, adding gives the sum, pincing
474 the duck makes it quack.
475
476 This is what everybody does when they write tests.
477
478
479 @subsection BorderlineTests Borderline cases
480
481 If you have a size anywhere for your component, does it work for
482 size 1? Size 0? Sizes close to <code>UINT_MAX</code>?
483
484 It might not be sensible to have a size 0, so in this case it is
485 not a borderline case, but rather a faulty input (see @ref
486 FaultyInputTests).
487
488
489 @subsection FaultyInputTests Faulty input
490
491 Does your bitmap handle 0 bits size? Well, it might not be designed
492 for it, but is should <em>not</em> crash the application, but
493 rather produce an error. This is called defensive programming.
494
495 Unfortunately, adding checks for values that should just not be
496 entered at all is not always practical: the checks cost cycles and
497 might cost more than it's worth. For example, some functions are
498 designed so that you may not give it a null pointer. In those
499 cases it's not sensible to pass it <code>NULL</code> just to see it
500 crash.
501
502 Since every experienced programmer add an <code>assert()</code> to
503 ensure that you get a proper failure for the debug builds when a
504 null pointer passed (you add asserts too, right?), you will in this
505 case instead have a controlled (early) crash in the debug build.
506
507
508 @subsection ErrorHandlingTests Error handling
509
510 This is testing that the errors your component is designed to give
511 actually are produced. For example, testing that trying to open a
512 non-existing file produces a sensible error code.
513
514
515 @subsection BadEnvironmentTests Environment
516
517 Sometimes, modules has to behave well even when the environment
518 fails to work correctly. Typical examples are when the computer is
519 out of dynamic memory or when the disk is full. You can emulate
520 this by replacing, e.g., <code>malloc()</code> with your own
521 version that will work for a while, but then fail. Some things are
522 worth to keep in mind here:
523
524 - Make sure to make the function fail deterministically, so that
525 you really can repeat the test.
526
527 - Make sure that it doesn't just fail immediately. The unit might
528 have checks for the first case, but might actually fail some time
529 in the near future.
530
531
532 @section UnitTest How to structure a unit test
533
534 In this section we will give some advice on how to structure the
535 unit tests to make the development run smoothly. The basic
536 structure of a test is:
537
538 - Plan
539 - Test
540 - Report
541
542
543 @subsection TestPlanning Plan the test
544
545 Planning the test means telling how many tests there are. In the
546 event that one of the tests causes a crash, it is then possible to
547 see that there are fewer tests than expected, and print a proper
548 error message.
549
550 To plan a test, use the @c plan() function in the following manner:
551
552 @code
553 int main(int argc, char *argv[])
554 {
555 plan(5);
556 .
557 .
558 .
559 }
560 @endcode
561
562 If you don't call the @c plan() function, the number of tests
563 executed will be printed at the end. This is intended to be used
564 while developing the unit and you are constantly adding tests. It
565 is not indented to be used after the unit has been released.
566
567
568 @subsection TestRunning Execute the test
569
570 To report the status of a test, the @c ok() function is used in the
571 following manner:
572
573 @code
574 int main(int argc, char *argv[])
575 {
576 plan(5);
577 ok(ducks == paddling_ducks,
578 "%d ducks did not paddle", ducks - paddling_ducks);
579 .
580 .
581 .
582 }
583 @endcode
584
585 This will print a test result line on the standard output in TAP
586 format, which allows TAP handling frameworks (like Test::Harness)
587 to parse the status of the test.
588
589 @subsection TestReport Report the result of the test
590
591 At the end, a complete test report should be written, with some
592 statistics. If the test returns EXIT_SUCCESS, all tests were
593 successfull, otherwise at least one test failed.
594
595 To get a TAP complient output and exit status, report the exit
596 status in the following manner:
597
598 @code
599 int main(int argc, char *argv[])
600 {
601 plan(5);
602 ok(ducks == paddling_ducks,
603 "%d ducks did not paddle", ducks - paddling_ducks);
604 .
605 .
606 .
607 return exit_status();
608 }
609 @endcode
610
611 @section DontDoThis Ways to not do unit testing
612
613 In this section, we'll go through some quite common ways to write
614 tests that are <em>not</em> a good idea.
615
616 @subsection BreadthFirstTests Doing breadth-first testing
617
618 If you're writing a library with several functions, don't test all
619 functions using size 1, then all functions using size 2, etc. If a
620 test for size 42 fails, you have no easy way of tracking down why
621 it failed.
622
623 It is better to concentrate on getting one function to work at a
624 time, which means that you test each function for all sizes that
625 you think is reasonable. Then you continue with the next function,
626 doing the same. This is usually also the way that a library is
627 developed (one function at a time) so stick to testing that is
628 appropriate for now the unit is developed.
629
630 @subsection JustToBeSafeTest Writing unnecessarily large tests
631
632 Don't write tests that use parameters in the range 1-1024 unless
633 you have a very good reason to belive that the component will
634 succeed for 562 but fail for 564 (the numbers picked are just
635 examples).
636
637 It is very common to write extensive tests "just to be safe."
638 Having a test suite with a lot of values might give you a warm
639 fuzzy feeling, but it doesn't really help you find the bugs. Good
640 tests fail; seriously, if you write a test that you expect to
641 succeed, you don't need to write it. If you think that it
642 <em>might</em> fail, <em>then</em> you should write it.
643
644 Don't take this as an excuse to avoid writing any tests at all
645 "since I make no mistakes" (when it comes to this, there are two
646 kinds of people: those who admit they make mistakes, and those who
647 don't); rather, this means that there is no reason to test that
648 using a buffer with size 100 works when you have a test for buffer
649 size 96.
650
651 The drawback is that the test suite takes longer to run, for little
652 or no benefit. It is acceptable to do a exhaustive test if it
653 doesn't take too long to run and it is quite common to do an
654 exhaustive test of a function for a small set of values.
655 Use your judgment to decide what is excessive: your milage may
656 vary.
657*/
658
659/**
660 @example simple.t.c
661
662 This is an simple example of how to write a test using the
663 library. The output of this program is:
664
665 @code
666 1..1
667 # Testing basic functions
668 ok 1 - Testing gcs()
669 @endcode
670
671 The basic structure is: plan the number of test points using the
672 plan() function, perform the test and write out the result of each
673 test point using the ok() function, print out a diagnostics message
674 using diag(), and report the result of the test by calling the
675 exit_status() function. Observe that this test does excessive
676 testing (see @ref JustToBeSafeTest), but the test point doesn't
677 take very long time.
678*/
679
680/**
681 @example todo.t.c
682
683 This example demonstrates how to use the <code>todo_start()</code>
684 and <code>todo_end()</code> function to mark a sequence of tests to
685 be done. Observe that the tests are assumed to fail: if any test
686 succeeds, it is considered a "bonus".
687*/
688
689/**
690 @example skip.t.c
691
692 This is an example of how the <code>SKIP_BLOCK_IF</code> can be
693 used to skip a predetermined number of tests. Observe that the
694 macro actually skips the following statement, but it's not sensible
695 to use anything than a block.
696*/
697
698/**
699 @example skip_all.t.c
700
701 Sometimes, you skip an entire test because it's testing a feature
702 that doesn't exist on the system that you're testing. To skip an
703 entire test, use the <code>skip_all()</code> function according to
704 this example.
705 */
706