| 1 | /* Copyright (C) 2006 MySQL AB |
| 2 | |
| 3 | This program is free software; you can redistribute it and/or modify |
| 4 | it under the terms of the GNU General Public License as published by |
| 5 | the Free Software Foundation; version 2 of the License. |
| 6 | |
| 7 | This program is distributed in the hope that it will be useful, |
| 8 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | GNU General Public License for more details. |
| 11 | |
| 12 | You should have received a copy of the GNU General Public License |
| 13 | along with this program; if not, write to the Free Software |
| 14 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, |
| 15 | MA 02111-1301, USA |
| 16 | |
| 17 | Library for providing TAP support for testing C and C++ was written |
| 18 | by Mats Kindahl <mats@mysql.com>. |
| 19 | */ |
| 20 | |
| 21 | #include "tap.h" |
| 22 | |
| 23 | #include "ma_global.h" |
| 24 | |
| 25 | #include <stdlib.h> |
| 26 | #include <stdarg.h> |
| 27 | #include <stdio.h> |
| 28 | #include <string.h> |
| 29 | #include <signal.h> |
| 30 | |
| 31 | /* |
| 32 | Visual Studio 2003 does not know vsnprintf but knows _vsnprintf. |
| 33 | We don't put this #define in config-win.h because we prefer |
| 34 | ma_vsnprintf everywhere instead, except when linking with libmysys |
| 35 | is not desirable - the case here. |
| 36 | */ |
| 37 | #if defined(_MSC_VER) && ( _MSC_VER == 1310 ) |
| 38 | #define vsnprintf _vsnprintf |
| 39 | #endif |
| 40 | |
| 41 | /** |
| 42 | @defgroup MyTAP_Internal MyTAP Internals |
| 43 | |
| 44 | Internal functions and data structures for the MyTAP implementation. |
| 45 | */ |
| 46 | |
| 47 | /** |
| 48 | Test data structure. |
| 49 | |
| 50 | Data structure containing all information about the test suite. |
| 51 | |
| 52 | @ingroup MyTAP_Internal |
| 53 | */ |
| 54 | static TEST_DATA g_test = { 0, 0, 0, "" }; |
| 55 | |
| 56 | /** |
| 57 | Output stream for test report message. |
| 58 | |
| 59 | The macro is just a temporary solution. |
| 60 | |
| 61 | @ingroup MyTAP_Internal |
| 62 | */ |
| 63 | #define tapout stdout |
| 64 | |
| 65 | /** |
| 66 | Emit the beginning of a test line, that is: "(not) ok", test number, |
| 67 | and description. |
| 68 | |
| 69 | To emit the directive, use the emit_dir() function |
| 70 | |
| 71 | @ingroup MyTAP_Internal |
| 72 | |
| 73 | @see emit_dir |
| 74 | |
| 75 | @param pass 'true' if test passed, 'false' otherwise |
| 76 | @param fmt Description of test in printf() format. |
| 77 | @param ap Vararg list for the description string above. |
| 78 | */ |
| 79 | static void |
| 80 | vemit_tap(int pass, char const *fmt, va_list ap) |
| 81 | { |
| 82 | fprintf(tapout, "%sok %d%s" , |
| 83 | pass ? "" : "not " , |
| 84 | ++g_test.last, |
| 85 | (fmt && *fmt) ? " - " : "" ); |
| 86 | if (fmt && *fmt) |
| 87 | vfprintf(tapout, fmt, ap); |
| 88 | } |
| 89 | |
| 90 | |
| 91 | /** |
| 92 | Emit a TAP directive. |
| 93 | |
| 94 | TAP directives are comments after that have the form: |
| 95 | |
| 96 | @code |
| 97 | ok 1 # skip reason for skipping |
| 98 | not ok 2 # todo some text explaining what remains |
| 99 | @endcode |
| 100 | |
| 101 | @ingroup MyTAP_Internal |
| 102 | |
| 103 | @param dir Directive as a string |
| 104 | @param why Explanation string |
| 105 | */ |
| 106 | static void |
| 107 | emit_dir(const char *dir, const char *why) |
| 108 | { |
| 109 | fprintf(tapout, " # %s %s" , dir, why); |
| 110 | } |
| 111 | |
| 112 | |
| 113 | /** |
| 114 | Emit a newline to the TAP output stream. |
| 115 | |
| 116 | @ingroup MyTAP_Internal |
| 117 | */ |
| 118 | static void |
| 119 | emit_endl() |
| 120 | { |
| 121 | fprintf(tapout, "\n" ); |
| 122 | } |
| 123 | |
| 124 | static void |
| 125 | handle_core_signal(int signo) |
| 126 | { |
| 127 | BAIL_OUT("Signal %d thrown" , signo); |
| 128 | } |
| 129 | |
| 130 | void |
| 131 | BAIL_OUT(char const *fmt, ...) |
| 132 | { |
| 133 | va_list ap; |
| 134 | va_start(ap, fmt); |
| 135 | fprintf(tapout, "Bail out! " ); |
| 136 | vfprintf(tapout, fmt, ap); |
| 137 | emit_endl(); |
| 138 | va_end(ap); |
| 139 | exit(255); |
| 140 | } |
| 141 | |
| 142 | |
| 143 | void |
| 144 | diag(char const *fmt, ...) |
| 145 | { |
| 146 | va_list ap; |
| 147 | va_start(ap, fmt); |
| 148 | fprintf(tapout, "# " ); |
| 149 | vfprintf(tapout, fmt, ap); |
| 150 | emit_endl(); |
| 151 | va_end(ap); |
| 152 | } |
| 153 | |
| 154 | typedef struct signal_entry { |
| 155 | int signo; |
| 156 | void (*handler)(int); |
| 157 | } signal_entry; |
| 158 | |
| 159 | static signal_entry install_signal[]= { |
| 160 | #ifdef SIGQUIT |
| 161 | { SIGQUIT, handle_core_signal }, |
| 162 | #endif |
| 163 | { SIGILL, handle_core_signal }, |
| 164 | { SIGABRT, handle_core_signal }, |
| 165 | { SIGFPE, handle_core_signal }, |
| 166 | { SIGSEGV, handle_core_signal } |
| 167 | #ifdef SIGBUS |
| 168 | , { SIGBUS, handle_core_signal } |
| 169 | #endif |
| 170 | #ifdef SIGXCPU |
| 171 | , { SIGXCPU, handle_core_signal } |
| 172 | #endif |
| 173 | #ifdef SIGXCPU |
| 174 | , { SIGXFSZ, handle_core_signal } |
| 175 | #endif |
| 176 | #ifdef SIGXCPU |
| 177 | , { SIGSYS, handle_core_signal } |
| 178 | #endif |
| 179 | #ifdef SIGXCPU |
| 180 | , { SIGTRAP, handle_core_signal } |
| 181 | #endif |
| 182 | }; |
| 183 | |
| 184 | int skip_big_tests= 1; |
| 185 | |
| 186 | void |
| 187 | plan(int const count) |
| 188 | { |
| 189 | char *config= getenv("MYTAP_CONFIG" ); |
| 190 | size_t i; |
| 191 | |
| 192 | if (config) |
| 193 | skip_big_tests= strcmp(config, "big" ); |
| 194 | |
| 195 | setvbuf(tapout, 0, _IONBF, 0); /* provide output at once */ |
| 196 | /* |
| 197 | Install signal handler |
| 198 | */ |
| 199 | |
| 200 | for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i) |
| 201 | signal(install_signal[i].signo, install_signal[i].handler); |
| 202 | |
| 203 | g_test.plan= count; |
| 204 | switch (count) |
| 205 | { |
| 206 | case NO_PLAN: |
| 207 | break; |
| 208 | default: |
| 209 | if (count > 0) |
| 210 | fprintf(tapout, "1..%d\n" , count); |
| 211 | break; |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | |
| 216 | void |
| 217 | skip_all(char const *reason, ...) |
| 218 | { |
| 219 | va_list ap; |
| 220 | va_start(ap, reason); |
| 221 | fprintf(tapout, "1..0 # skip " ); |
| 222 | vfprintf(tapout, reason, ap); |
| 223 | va_end(ap); |
| 224 | exit(0); |
| 225 | } |
| 226 | |
| 227 | void |
| 228 | ok(int const pass, char const *fmt, ...) |
| 229 | { |
| 230 | va_list ap; |
| 231 | va_start(ap, fmt); |
| 232 | |
| 233 | if (!pass && *g_test.todo == '\0') |
| 234 | ++g_test.failed; |
| 235 | |
| 236 | vemit_tap(pass, fmt, ap); |
| 237 | va_end(ap); |
| 238 | if (*g_test.todo != '\0') |
| 239 | emit_dir("todo" , g_test.todo); |
| 240 | emit_endl(); |
| 241 | } |
| 242 | |
| 243 | |
| 244 | void |
| 245 | skip(int how_many, char const *const fmt, ...) |
| 246 | { |
| 247 | char reason[80]; |
| 248 | if (fmt && *fmt) |
| 249 | { |
| 250 | va_list ap; |
| 251 | va_start(ap, fmt); |
| 252 | vsnprintf(reason, sizeof(reason), fmt, ap); |
| 253 | va_end(ap); |
| 254 | } |
| 255 | else |
| 256 | reason[0] = '\0'; |
| 257 | |
| 258 | while (how_many-- > 0) |
| 259 | { |
| 260 | va_list ap; |
| 261 | memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */ |
| 262 | vemit_tap(1, NULL, ap); |
| 263 | emit_dir("skip" , reason); |
| 264 | emit_endl(); |
| 265 | } |
| 266 | } |
| 267 | |
| 268 | void |
| 269 | todo_start(char const *message, ...) |
| 270 | { |
| 271 | va_list ap; |
| 272 | va_start(ap, message); |
| 273 | vsnprintf(g_test.todo, sizeof(g_test.todo), message, ap); |
| 274 | va_end(ap); |
| 275 | } |
| 276 | |
| 277 | void |
| 278 | todo_end() |
| 279 | { |
| 280 | *g_test.todo = '\0'; |
| 281 | } |
| 282 | |
| 283 | int exit_status() { |
| 284 | /* |
| 285 | If there were no plan, we write one last instead. |
| 286 | */ |
| 287 | if (g_test.plan == NO_PLAN) |
| 288 | plan(g_test.last); |
| 289 | |
| 290 | if (g_test.plan != g_test.last) |
| 291 | { |
| 292 | diag("%d tests planned but%s %d executed" , |
| 293 | g_test.plan, (g_test.plan > g_test.last ? " only" : "" ), g_test.last); |
| 294 | return EXIT_FAILURE; |
| 295 | } |
| 296 | |
| 297 | if (g_test.failed > 0) |
| 298 | { |
| 299 | diag("Failed %d tests!" , g_test.failed); |
| 300 | return EXIT_FAILURE; |
| 301 | } |
| 302 | |
| 303 | return EXIT_SUCCESS; |
| 304 | } |
| 305 | |
| 306 | /** |
| 307 | @mainpage Testing C and C++ using MyTAP |
| 308 | |
| 309 | @section IntroSec Introduction |
| 310 | |
| 311 | Unit tests are used to test individual components of a system. In |
| 312 | contrast, functional tests usually test the entire system. The |
| 313 | rationale is that each component should be correct if the system is |
| 314 | to be correct. Unit tests are usually small pieces of code that |
| 315 | tests an individual function, class, a module, or other unit of the |
| 316 | code. |
| 317 | |
| 318 | Observe that a correctly functioning system can be built from |
| 319 | "faulty" components. The problem with this approach is that as the |
| 320 | system evolves, the bugs surface in unexpected ways, making |
| 321 | maintenance harder. |
| 322 | |
| 323 | The advantages of using unit tests to test components of the system |
| 324 | are several: |
| 325 | |
| 326 | - The unit tests can make a more thorough testing than the |
| 327 | functional tests by testing correctness even for pathological use |
| 328 | (which shouldn't be present in the system). This increases the |
| 329 | overall robustness of the system and makes maintenance easier. |
| 330 | |
| 331 | - It is easier and faster to find problems with a malfunctioning |
| 332 | component than to find problems in a malfunctioning system. This |
| 333 | shortens the compile-run-edit cycle and therefore improves the |
| 334 | overall performance of development. |
| 335 | |
| 336 | - The component has to support at least two uses: in the system and |
| 337 | in a unit test. This leads to more generic and stable interfaces |
| 338 | and in addition promotes the development of reusable components. |
| 339 | |
| 340 | For example, the following are typical functional tests: |
| 341 | - Does transactions work according to specifications? |
| 342 | - Can we connect a client to the server and execute statements? |
| 343 | |
| 344 | In contrast, the following are typical unit tests: |
| 345 | |
| 346 | - Can the 'String' class handle a specified list of character sets? |
| 347 | - Does all operations for 'my_bitmap' produce the correct result? |
| 348 | - Does all the NIST test vectors for the AES implementation encrypt |
| 349 | correctly? |
| 350 | |
| 351 | |
| 352 | @section UnitTest Writing unit tests |
| 353 | |
| 354 | The purpose of writing unit tests is to use them to drive component |
| 355 | development towards a solution that passes the tests. This means that the |
| 356 | unit tests has to be as complete as possible, testing at least: |
| 357 | |
| 358 | - Normal input |
| 359 | - Borderline cases |
| 360 | - Faulty input |
| 361 | - Error handling |
| 362 | - Bad environment |
| 363 | |
| 364 | @subsection NormalSubSec Normal input |
| 365 | |
| 366 | This is to test that the component have the expected behaviour. |
| 367 | This is just plain simple: test that it works. For example, test |
| 368 | that you can unpack what you packed, adding gives the sum, pincing |
| 369 | the duck makes it quack. |
| 370 | |
| 371 | This is what everybody does when they write tests. |
| 372 | |
| 373 | |
| 374 | @subsection BorderlineTests Borderline cases |
| 375 | |
| 376 | If you have a size anywhere for your component, does it work for |
| 377 | size 1? Size 0? Sizes close to <code>UINT_MAX</code>? |
| 378 | |
| 379 | It might not be sensible to have a size 0, so in this case it is |
| 380 | not a borderline case, but rather a faulty input (see @ref |
| 381 | FaultyInputTests). |
| 382 | |
| 383 | |
| 384 | @subsection FaultyInputTests Faulty input |
| 385 | |
| 386 | Does your bitmap handle 0 bits size? Well, it might not be designed |
| 387 | for it, but is should <em>not</em> crash the application, but |
| 388 | rather produce an error. This is called defensive programming. |
| 389 | |
| 390 | Unfortunately, adding checks for values that should just not be |
| 391 | entered at all is not always practical: the checks cost cycles and |
| 392 | might cost more than it's worth. For example, some functions are |
| 393 | designed so that you may not give it a null pointer. In those |
| 394 | cases it's not sensible to pass it <code>NULL</code> just to see it |
| 395 | crash. |
| 396 | |
| 397 | Since every experienced programmer add an <code>assert()</code> to |
| 398 | ensure that you get a proper failure for the debug builds when a |
| 399 | null pointer passed (you add asserts too, right?), you will in this |
| 400 | case instead have a controlled (early) crash in the debug build. |
| 401 | |
| 402 | |
| 403 | @subsection ErrorHandlingTests Error handling |
| 404 | |
| 405 | This is testing that the errors your component is designed to give |
| 406 | actually are produced. For example, testing that trying to open a |
| 407 | non-existing file produces a sensible error code. |
| 408 | |
| 409 | |
| 410 | @subsection BadEnvironmentTests Environment |
| 411 | |
| 412 | Sometimes, modules has to behave well even when the environment |
| 413 | fails to work correctly. Typical examples are when the computer is |
| 414 | out of dynamic memory or when the disk is full. You can emulate |
| 415 | this by replacing, e.g., <code>malloc()</code> with your own |
| 416 | version that will work for a while, but then fail. Some things are |
| 417 | worth to keep in mind here: |
| 418 | |
| 419 | - Make sure to make the function fail deterministically, so that |
| 420 | you really can repeat the test. |
| 421 | |
| 422 | - Make sure that it doesn't just fail immediately. The unit might |
| 423 | have checks for the first case, but might actually fail some time |
| 424 | in the near future. |
| 425 | |
| 426 | |
| 427 | @section UnitTest How to structure a unit test |
| 428 | |
| 429 | In this section we will give some advice on how to structure the |
| 430 | unit tests to make the development run smoothly. The basic |
| 431 | structure of a test is: |
| 432 | |
| 433 | - Plan |
| 434 | - Test |
| 435 | - Report |
| 436 | |
| 437 | |
| 438 | @subsection TestPlanning Plan the test |
| 439 | |
| 440 | Planning the test means telling how many tests there are. In the |
| 441 | event that one of the tests causes a crash, it is then possible to |
| 442 | see that there are fewer tests than expected, and print a proper |
| 443 | error message. |
| 444 | |
| 445 | To plan a test, use the @c plan() function in the following manner: |
| 446 | |
| 447 | @code |
| 448 | int main(int argc, char *argv[]) |
| 449 | { |
| 450 | plan(5); |
| 451 | . |
| 452 | . |
| 453 | . |
| 454 | } |
| 455 | @endcode |
| 456 | |
| 457 | If you don't call the @c plan() function, the number of tests |
| 458 | executed will be printed at the end. This is intended to be used |
| 459 | while developing the unit and you are constantly adding tests. It |
| 460 | is not indented to be used after the unit has been released. |
| 461 | |
| 462 | |
| 463 | @subsection TestRunning Execute the test |
| 464 | |
| 465 | To report the status of a test, the @c ok() function is used in the |
| 466 | following manner: |
| 467 | |
| 468 | @code |
| 469 | int main(int argc, char *argv[]) |
| 470 | { |
| 471 | plan(5); |
| 472 | ok(ducks == paddling_ducks, |
| 473 | "%d ducks did not paddle", ducks - paddling_ducks); |
| 474 | . |
| 475 | . |
| 476 | . |
| 477 | } |
| 478 | @endcode |
| 479 | |
| 480 | This will print a test result line on the standard output in TAP |
| 481 | format, which allows TAP handling frameworks (like Test::Harness) |
| 482 | to parse the status of the test. |
| 483 | |
| 484 | @subsection TestReport Report the result of the test |
| 485 | |
| 486 | At the end, a complete test report should be written, with some |
| 487 | statistics. If the test returns EXIT_SUCCESS, all tests were |
| 488 | successful, otherwise at least one test failed. |
| 489 | |
| 490 | To get a TAP compliant output and exit status, report the exit |
| 491 | status in the following manner: |
| 492 | |
| 493 | @code |
| 494 | int main(int argc, char *argv[]) |
| 495 | { |
| 496 | plan(5); |
| 497 | ok(ducks == paddling_ducks, |
| 498 | "%d ducks did not paddle", ducks - paddling_ducks); |
| 499 | . |
| 500 | . |
| 501 | . |
| 502 | return exit_status(); |
| 503 | } |
| 504 | @endcode |
| 505 | |
| 506 | @section DontDoThis Ways to not do unit testing |
| 507 | |
| 508 | In this section, we'll go through some quite common ways to write |
| 509 | tests that are <em>not</em> a good idea. |
| 510 | |
| 511 | @subsection BreadthFirstTests Doing breadth-first testing |
| 512 | |
| 513 | If you're writing a library with several functions, don't test all |
| 514 | functions using size 1, then all functions using size 2, etc. If a |
| 515 | test for size 42 fails, you have no easy way of tracking down why |
| 516 | it failed. |
| 517 | |
| 518 | It is better to concentrate on getting one function to work at a |
| 519 | time, which means that you test each function for all sizes that |
| 520 | you think is reasonable. Then you continue with the next function, |
| 521 | doing the same. This is usually also the way that a library is |
| 522 | developed (one function at a time) so stick to testing that is |
| 523 | appropriate for now the unit is developed. |
| 524 | |
| 525 | @subsection JustToBeSafeTest Writing unnecessarily large tests |
| 526 | |
| 527 | Don't write tests that use parameters in the range 1-1024 unless |
| 528 | you have a very good reason to believe that the component will |
| 529 | succeed for 562 but fail for 564 (the numbers picked are just |
| 530 | examples). |
| 531 | |
| 532 | It is very common to write extensive tests "just to be safe." |
| 533 | Having a test suite with a lot of values might give you a warm |
| 534 | fuzzy feeling, but it doesn't really help you find the bugs. Good |
| 535 | tests fail; seriously, if you write a test that you expect to |
| 536 | succeed, you don't need to write it. If you think that it |
| 537 | <em>might</em> fail, <em>then</em> you should write it. |
| 538 | |
| 539 | Don't take this as an excuse to avoid writing any tests at all |
| 540 | "since I make no mistakes" (when it comes to this, there are two |
| 541 | kinds of people: those who admit they make mistakes, and those who |
| 542 | don't); rather, this means that there is no reason to test that |
| 543 | using a buffer with size 100 works when you have a test for buffer |
| 544 | size 96. |
| 545 | |
| 546 | The drawback is that the test suite takes longer to run, for little |
| 547 | or no benefit. It is acceptable to do a exhaustive test if it |
| 548 | doesn't take too long to run and it is quite common to do an |
| 549 | exhaustive test of a function for a small set of values. |
| 550 | Use your judgment to decide what is excessive: your milage may |
| 551 | vary. |
| 552 | */ |
| 553 | |
| 554 | /** |
| 555 | @example simple.t.c |
| 556 | |
| 557 | This is an simple example of how to write a test using the |
| 558 | library. The output of this program is: |
| 559 | |
| 560 | @code |
| 561 | 1..1 |
| 562 | # Testing basic functions |
| 563 | ok 1 - Testing gcs() |
| 564 | @endcode |
| 565 | |
| 566 | The basic structure is: plan the number of test points using the |
| 567 | plan() function, perform the test and write out the result of each |
| 568 | test point using the ok() function, print out a diagnostics message |
| 569 | using diag(), and report the result of the test by calling the |
| 570 | exit_status() function. Observe that this test does excessive |
| 571 | testing (see @ref JustToBeSafeTest), but the test point doesn't |
| 572 | take very long time. |
| 573 | */ |
| 574 | |
| 575 | /** |
| 576 | @example todo.t.c |
| 577 | |
| 578 | This example demonstrates how to use the <code>todo_start()</code> |
| 579 | and <code>todo_end()</code> function to mark a sequence of tests to |
| 580 | be done. Observe that the tests are assumed to fail: if any test |
| 581 | succeeds, it is considered a "bonus". |
| 582 | */ |
| 583 | |
| 584 | /** |
| 585 | @example skip.t.c |
| 586 | |
| 587 | This is an example of how the <code>SKIP_BLOCK_IF</code> can be |
| 588 | used to skip a predetermined number of tests. Observe that the |
| 589 | macro actually skips the following statement, but it's not sensible |
| 590 | to use anything than a block. |
| 591 | */ |
| 592 | |
| 593 | /** |
| 594 | @example skip_all.t.c |
| 595 | |
| 596 | Sometimes, you skip an entire test because it's testing a feature |
| 597 | that doesn't exist on the system that you're testing. To skip an |
| 598 | entire test, use the <code>skip_all()</code> function according to |
| 599 | this example. |
| 600 | */ |
| 601 | |