tests: latency_measure: Increase verbosity of summary lines

Feedback from the previous summary line change has been that it
had become more difficult for people to parse and understand what
is being tested/benchmarked when the description consisted of
just using a terse tag. To improve the situation a more human
oriented description has been added to follow that tag so that
not only can tools still parse on the tag, but people can use
both tag and the extra description to understand the test/benchmark.

Summary lines for each test now consist of the following:
    1. A terse tag (for tools to parse)
    2. A more human oriented description.
    3. Number of cycles.
    4. Number of nanoseconds.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2024-01-17 13:17:07 -05:00 committed by Carles Cufí
parent db58810492
commit d3a3d63afb
11 changed files with 356 additions and 264 deletions

View file

@ -16,10 +16,10 @@ including:
* Time it takes to suspend a thread
* Time it takes to resume a suspended thread
* Time it takes to abort a thread
* Time it takes to add data to a FIFO/LIFO
* Time it takes to retrieve data from a FIFO/LIFO
* Time it takes to wait on a FIFO/LIFO (and context switch)
* Time it takes to wake and switch to a thread waiting on a FIFO/LIFO
* Time it takes to add data to a fifo.LIFO
* Time it takes to retrieve data from a fifo.LIFO
* Time it takes to wait on a fifo.lifo.(and context switch)
* Time it takes to wake and switch to a thread waiting on a fifo.LIFO
* Time it takes to send and receive events
* Time it takes to wait for events (and context switch)
* Time it takes to wake and switch to a thread waiting for events
@ -36,157 +36,158 @@ threads:
Sample output of the benchmark (without userspace enabled)::
*** Booting Zephyr OS build zephyr-v3.5.0-3537-g5dbe0ce2622d ***
THREAD yield.preemptive.ctx.(K -> K) : 344 cycles , 2866 ns :
THREAD yield.cooperative.ctx.(K -> K) : 344 cycles , 2867 ns :
ISR resume.interrupted.thread.kernel : 498 cycles , 4158 ns :
ISR resume.different.thread.kernel : 383 cycles , 3199 ns :
THREAD create.kernel.from.kernel : 401 cycles , 3349 ns :
THREAD start.kernel.from.kernel : 418 cycles , 3491 ns :
THREAD suspend.kernel.from.kernel : 433 cycles , 3616 ns :
THREAD resume.kernel.from.kernel : 351 cycles , 2933 ns :
THREAD abort.kernel.from.kernel : 349 cycles , 2909 ns :
FIFO put.immediate.kernel : 294 cycles , 2450 ns :
FIFO get.immediate.kernel : 135 cycles , 1133 ns :
FIFO put.alloc.immediate.kernel : 906 cycles , 7550 ns :
FIFO get.free.immediate.kernel : 570 cycles , 4750 ns :
FIFO get.blocking.(K -> K) : 545 cycles , 4542 ns :
FIFO put.wake+ctx.(K -> K) : 675 cycles , 5625 ns :
FIFO get.free.blocking.(K -> K) : 555 cycles , 4625 ns :
FIFO put.alloc.wake+ctx.(K -> K) : 670 cycles , 5583 ns :
LIFO put.immediate.kernel : 282 cycles , 2350 ns :
LIFO get.immediate.kernel : 135 cycles , 1133 ns :
LIFO put.alloc.immediate.kernel : 903 cycles , 7526 ns :
LIFO get.free.immediate.kernel : 570 cycles , 4750 ns :
LIFO get.blocking.(K -> K) : 542 cycles , 4524 ns :
LIFO put.wake+ctx.(K -> K) : 670 cycles , 5584 ns :
LIFO get.free.blocking.(K -> K) : 547 cycles , 4558 ns :
LIFO put.alloc.wake+ctx.(K -> K) : 670 cycles , 5583 ns :
EVENTS post.immediate.kernel : 220 cycles , 1833 ns :
EVENTS set.immediate.kernel : 225 cycles , 1875 ns :
EVENTS wait.immediate.kernel : 125 cycles , 1041 ns :
EVENTS wait_all.immediate.kernel : 145 cycles , 1208 ns :
EVENTS wait.blocking.(K -> K) : 594 cycles , 4958 ns :
EVENTS set.wake+ctx.(K -> K) : 774 cycles , 6451 ns :
EVENTS wait_all.blocking.(K -> K) : 605 cycles , 5042 ns :
EVENTS post.wake+ctx.(K -> K) : 785 cycles , 6542 ns :
SEMAPHORE give.immediate.kernel : 165 cycles , 1375 ns :
SEMAPHORE take.immediate.kernel : 69 cycles , 575 ns :
SEMAPHORE take.blocking.(K -> K) : 489 cycles , 4075 ns :
SEMAPHORE give.wake+ctx.(K -> K) : 604 cycles , 5033 ns :
MUTEX lock.immediate.recursive.kernel : 115 cycles , 958 ns :
MUTEX unlock.immediate.recursive.kernel : 40 cycles , 333 ns :
HEAP malloc.immediate : 615 cycles , 5125 ns :
HEAP free.immediate : 431 cycles , 3591 ns :
*** Booting Zephyr OS build zephyr-v3.5.0-4267-g6ccdc31233a3 ***
thread.yield.preemptive.ctx.k_to_k - Context switch via k_yield : 329 cycles , 2741 ns :
thread.yield.cooperative.ctx.k_to_k - Context switch via k_yield : 329 cycles , 2741 ns :
isr.resume.interrupted.thread.kernel - Return from ISR to interrupted thread : 363 cycles , 3033 ns :
isr.resume.different.thread.kernel - Return from ISR to another thread : 404 cycles , 3367 ns :
thread.create.kernel.from.kernel - Create thread : 404 cycles , 3374 ns :
thread.start.kernel.from.kernel - Start thread : 423 cycles , 3533 ns :
thread.suspend.kernel.from.kernel - Suspend thread : 428 cycles , 3574 ns :
thread.resume.kernel.from.kernel - Resume thread : 350 cycles , 2924 ns :
thread.abort.kernel.from.kernel - Abort thread : 339 cycles , 2826 ns :
fifo.put.immediate.kernel - Add data to FIFO (no ctx switch) : 269 cycles , 2242 ns :
fifo.get.immediate.kernel - Get data from FIFO (no ctx switch) : 128 cycles , 1074 ns :
fifo.put.alloc.immediate.kernel - Allocate to add data to FIFO (no ctx switch) : 945 cycles , 7875 ns :
fifo.get.free.immediate.kernel - Free when getting data from FIFO (no ctx switch) : 575 cycles , 4792 ns :
fifo.get.blocking.k_to_k - Get data from FIFO (w/ ctx switch) : 551 cycles , 4592 ns :
fifo.put.wake+ctx.k_to_k - Add data to FIFO (w/ ctx switch) : 660 cycles , 5500 ns :
fifo.get.free.blocking.k_to_k - Free when getting data from FIFO (w/ ctx siwtch) : 553 cycles , 4608 ns :
fifo.put.alloc.wake+ctx.k_to_k - Allocate to add data to FIFO (w/ ctx switch) : 655 cycles , 5458 ns :
lifo.put.immediate.kernel - Add data to LIFO (no ctx switch) : 280 cycles , 2341 ns :
lifo.get.immediate.kernel - Get data from LIFO (no ctx switch) : 133 cycles , 1116 ns :
lifo.put.alloc.immediate.kernel - Allocate to add data to LIFO (no ctx switch) : 945 cycles , 7875 ns :
lifo.get.free.immediate.kernel - Free when getting data from LIFO (no ctx switch) : 580 cycles , 4833 ns :
lifo.get.blocking.k_to_k - Get data from LIFO (w/ ctx switch) : 553 cycles , 4608 ns :
lifo.put.wake+ctx.k_to_k - Add data to LIFO (w/ ctx switch) : 655 cycles , 5458 ns :
lifo.get.free.blocking.k_to_k - Free when getting data from LIFO (w/ ctx switch) : 550 cycles , 4583 ns :
lifo.put.alloc.wake+ctx.k_to_k - Allocate to add data to LIFO (w/ ctx siwtch) : 655 cycles , 5458 ns :
events.post.immediate.kernel - Post events (nothing wakes) : 225 cycles , 1875 ns :
events.set.immediate.kernel - Set events (nothing wakes) : 225 cycles , 1875 ns :
events.wait.immediate.kernel - Wait for any events (no ctx switch) : 130 cycles , 1083 ns :
events.wait_all.immediate.kernel - Wait for all events (no ctx switch) : 135 cycles , 1125 ns :
events.wait.blocking.k_to_k - Wait for any events (w/ ctx switch) : 573 cycles , 4783 ns :
events.set.wake+ctx.k_to_k - Set events (w/ ctx switch) : 784 cycles , 6534 ns :
events.wait_all.blocking.k_to_k - Wait for all events (w/ ctx switch) : 589 cycles , 4916 ns :
events.post.wake+ctx.k_to_k - Post events (w/ ctx switch) : 795 cycles , 6626 ns :
semaphore.give.immediate.kernel - Give a semaphore (no waiters) : 125 cycles , 1041 ns :
semaphore.take.immediate.kernel - Take a semaphore (no blocking) : 69 cycles , 575 ns :
semaphore.take.blocking.k_to_k - Take a semaphore (context switch) : 494 cycles , 4116 ns :
semaphore.give.wake+ctx.k_to_k - Give a semaphore (context switch) : 599 cycles , 4992 ns :
mutex.lock.immediate.recursive.kernel - Lock a mutex : 100 cycles , 833 ns :
mutex.unlock.immediate.recursive.kernel - Unlock a mutex : 40 cycles , 333 ns :
heap.malloc.immediate - Average time for heap malloc : 627 cycles , 5225 ns :
heap.free.immediate - Average time for heap free : 432 cycles , 3600 ns :
===================================================================
PROJECT EXECUTION SUCCESSFUL
Sample output of the benchmark (with userspace enabled)::
*** Booting Zephyr OS build zephyr-v3.5.0-3537-g5dbe0ce2622d ***
THREAD yield.preemptive.ctx.(K -> K) : 990 cycles , 8250 ns :
THREAD yield.preemptive.ctx.(U -> U) : 1285 cycles , 10712 ns :
THREAD yield.preemptive.ctx.(K -> U) : 1178 cycles , 9817 ns :
THREAD yield.preemptive.ctx.(U -> K) : 1097 cycles , 9145 ns :
THREAD yield.cooperative.ctx.(K -> K) : 990 cycles , 8250 ns :
THREAD yield.cooperative.ctx.(U -> U) : 1285 cycles , 10712 ns :
THREAD yield.cooperative.ctx.(K -> U) : 1178 cycles , 9817 ns :
THREAD yield.cooperative.ctx.(U -> K) : 1097 cycles , 9146 ns :
ISR resume.interrupted.thread.kernel : 1120 cycles , 9333 ns :
ISR resume.different.thread.kernel : 1010 cycles , 8417 ns :
ISR resume.different.thread.user : 1207 cycles , 10062 ns :
THREAD create.kernel.from.kernel : 955 cycles , 7958 ns :
THREAD start.kernel.from.kernel : 1095 cycles , 9126 ns :
THREAD suspend.kernel.from.kernel : 1064 cycles , 8874 ns :
THREAD resume.kernel.from.kernel : 999 cycles , 8333 ns :
THREAD abort.kernel.from.kernel : 2280 cycles , 19000 ns :
THREAD create.user.from.kernel : 822 cycles , 6855 ns :
THREAD start.user.from.kernel : 6572 cycles , 54774 ns :
THREAD suspend.user.from.kernel : 1422 cycles , 11857 ns :
THREAD resume.user.from.kernel : 1177 cycles , 9812 ns :
THREAD abort.user.from.kernel : 2147 cycles , 17897 ns :
THREAD create.user.from.user : 2105 cycles , 17542 ns :
THREAD start.user.from.user : 6960 cycles , 58002 ns :
THREAD suspend user.from.user : 1610 cycles , 13417 ns :
THREAD resume user.from.user : 1565 cycles , 13042 ns :
THREAD abort user.from.user : 2780 cycles , 23167 ns :
THREAD start.kernel.from.user : 1482 cycles , 12353 ns :
THREAD suspend.kernel.from.user : 1252 cycles , 10437 ns :
THREAD resume.kernel.from.user : 1387 cycles , 11564 ns :
THREAD abort.kernel.from.user : 2912 cycles , 24272 ns :
FIFO put.immediate.kernel : 314 cycles , 2624 ns :
FIFO get.immediate.kernel : 215 cycles , 1792 ns :
FIFO put.alloc.immediate.kernel : 1025 cycles , 8541 ns :
FIFO get.free.immediate.kernel : 655 cycles , 5458 ns :
FIFO put.alloc.immediate.user : 1740 cycles , 14500 ns :
FIFO get.free.immediate.user : 1410 cycles , 11751 ns :
FIFO get.blocking.(K -> K) : 1249 cycles , 10416 ns :
FIFO put.wake+ctx.(K -> K) : 1320 cycles , 11000 ns :
FIFO get.free.blocking.(K -> K) : 1235 cycles , 10292 ns :
FIFO put.alloc.wake+ctx.(K -> K) : 1355 cycles , 11292 ns :
FIFO get.free.blocking.(U -> K) : 1750 cycles , 14584 ns :
FIFO put.alloc.wake+ctx.(K -> U) : 1680 cycles , 14001 ns :
FIFO get.free.blocking.(K -> U) : 1555 cycles , 12959 ns :
FIFO put.alloc.wake+ctx.(U -> K) : 1845 cycles , 15375 ns :
FIFO get.free.blocking.(U -> U) : 2070 cycles , 17251 ns :
FIFO put.alloc.wake+ctx.(U -> U) : 2170 cycles , 18084 ns :
LIFO put.immediate.kernel : 299 cycles , 2499 ns :
LIFO get.immediate.kernel : 204 cycles , 1708 ns :
LIFO put.alloc.immediate.kernel : 1015 cycles , 8459 ns :
LIFO get.free.immediate.kernel : 645 cycles , 5375 ns :
LIFO put.alloc.immediate.user : 1760 cycles , 14668 ns :
LIFO get.free.immediate.user : 1400 cycles , 11667 ns :
LIFO get.blocking.(K -> K) : 1234 cycles , 10291 ns :
LIFO put.wake+ctx.(K -> K) : 1315 cycles , 10959 ns :
LIFO get.free.blocking.(K -> K) : 1230 cycles , 10251 ns :
LIFO put.alloc.wake+ctx.(K -> K) : 1345 cycles , 11208 ns :
LIFO get.free.blocking.(U -> K) : 1745 cycles , 14544 ns :
LIFO put.alloc.wake+ctx.(K -> U) : 1680 cycles , 14000 ns :
LIFO get.free.blocking.(K -> U) : 1555 cycles , 12958 ns :
LIFO put.alloc.wake+ctx.(U -> K) : 1855 cycles , 15459 ns :
LIFO get.free.blocking.(U -> U) : 2070 cycles , 17251 ns :
LIFO put.alloc.wake+ctx.(U -> U) : 2190 cycles , 18251 ns :
EVENTS post.immediate.kernel : 285 cycles , 2375 ns :
EVENTS set.immediate.kernel : 285 cycles , 2375 ns :
EVENTS wait.immediate.kernel : 215 cycles , 1791 ns :
EVENTS wait_all.immediate.kernel : 215 cycles , 1791 ns :
EVENTS post.immediate.user : 775 cycles , 6459 ns :
EVENTS set.immediate.user : 780 cycles , 6500 ns :
EVENTS wait.immediate.user : 715 cycles , 5959 ns :
EVENTS wait_all.immediate.user : 720 cycles , 6000 ns :
EVENTS wait.blocking.(K -> K) : 1212 cycles , 10108 ns :
EVENTS set.wake+ctx.(K -> K) : 1450 cycles , 12084 ns :
EVENTS wait_all.blocking.(K -> K) : 1260 cycles , 10500 ns :
EVENTS post.wake+ctx.(K -> K) : 1490 cycles , 12417 ns :
EVENTS wait.blocking.(U -> K) : 1577 cycles , 13145 ns :
EVENTS set.wake+ctx.(K -> U) : 1617 cycles , 13479 ns :
EVENTS wait_all.blocking.(U -> K) : 1760 cycles , 14667 ns :
EVENTS post.wake+ctx.(K -> U) : 1790 cycles , 14917 ns :
EVENTS wait.blocking.(K -> U) : 1400 cycles , 11671 ns :
EVENTS set.wake+ctx.(U -> K) : 1812 cycles , 15104 ns :
EVENTS wait_all.blocking.(K -> U) : 1580 cycles , 13167 ns :
EVENTS post.wake+ctx.(U -> K) : 1985 cycles , 16542 ns :
EVENTS wait.blocking.(U -> U) : 1765 cycles , 14709 ns :
EVENTS set.wake+ctx.(U -> U) : 1979 cycles , 16499 ns :
EVENTS wait_all.blocking.(U -> U) : 2080 cycles , 17334 ns :
EVENTS post.wake+ctx.(U -> U) : 2285 cycles , 19043 ns :
SEMAPHORE give.immediate.kernel : 210 cycles , 1750 ns :
SEMAPHORE take.immediate.kernel : 145 cycles , 1208 ns :
SEMAPHORE give.immediate.user : 715 cycles , 5959 ns :
SEMAPHORE take.immediate.user : 660 cycles , 5500 ns :
SEMAPHORE take.blocking.(K -> K) : 1150 cycles , 9584 ns :
SEMAPHORE give.wake+ctx.(K -> K) : 1279 cycles , 10666 ns :
SEMAPHORE take.blocking.(K -> U) : 1343 cycles , 11192 ns :
SEMAPHORE give.wake+ctx.(U -> K) : 1637 cycles , 13645 ns :
SEMAPHORE take.blocking.(U -> K) : 1522 cycles , 12688 ns :
SEMAPHORE give.wake+ctx.(K -> U) : 1472 cycles , 12270 ns :
SEMAPHORE take.blocking.(U -> U) : 1715 cycles , 14296 ns :
SEMAPHORE give.wake+ctx.(U -> U) : 1830 cycles , 15250 ns :
MUTEX lock.immediate.recursive.kernel : 150 cycles , 1250 ns :
MUTEX unlock.immediate.recursive.kernel : 57 cycles , 475 ns :
MUTEX lock.immediate.recursive.user : 670 cycles , 5583 ns :
MUTEX unlock.immediate.recursive.user : 595 cycles , 4959 ns :
HEAP malloc.immediate : 629 cycles , 5241 ns :
HEAP free.immediate : 414 cycles , 3450 ns :
*** Booting Zephyr OS build zephyr-v3.5.0-4268-g6af7a1230a08 ***
thread.yield.preemptive.ctx.k_to_k - Context switch via k_yield : 970 cycles , 8083 ns :
thread.yield.preemptive.ctx.u_to_u - Context switch via k_yield : 1260 cycles , 10506 ns :
thread.yield.preemptive.ctx.k_to_u - Context switch via k_yield : 1155 cycles , 9632 ns :
thread.yield.preemptive.ctx.u_to_k - Context switch via k_yield : 1075 cycles , 8959 ns :
thread.yield.cooperative.ctx.k_to_k - Context switch via k_yield : 970 cycles , 8083 ns :
thread.yield.cooperative.ctx.u_to_u - Context switch via k_yield : 1260 cycles , 10506 ns :
thread.yield.cooperative.ctx.k_to_u - Context switch via k_yield : 1155 cycles , 9631 ns :
thread.yield.cooperative.ctx.u_to_k - Context switch via k_yield : 1075 cycles , 8959 ns :
isr.resume.interrupted.thread.kernel - Return from ISR to interrupted thread : 415 cycles , 3458 ns :
isr.resume.different.thread.kernel - Return from ISR to another thread : 985 cycles , 8208 ns :
isr.resume.different.thread.user - Return from ISR to another thread : 1180 cycles , 9833 ns :
thread.create.kernel.from.kernel - Create thread : 989 cycles , 8249 ns :
thread.start.kernel.from.kernel - Start thread : 1059 cycles , 8833 ns :
thread.suspend.kernel.from.kernel - Suspend thread : 1030 cycles , 8583 ns :
thread.resume.kernel.from.kernel - Resume thread : 994 cycles , 8291 ns :
thread.abort.kernel.from.kernel - Abort thread : 2370 cycles , 19751 ns :
thread.create.user.from.kernel - Create thread : 860 cycles , 7167 ns :
thread.start.user.from.kernel - Start thread : 8965 cycles , 74713 ns :
thread.suspend.user.from.kernel - Suspend thread : 1400 cycles , 11666 ns :
thread.resume.user.from.kernel - Resume thread : 1174 cycles , 9791 ns :
thread.abort.user.from.kernel - Abort thread : 2240 cycles , 18666 ns :
thread.create.user.from.user - Create thread : 2105 cycles , 17542 ns :
thread.start.user.from.user - Start thread : 9345 cycles , 77878 ns :
thread.suspend.user.from.user - Suspend thread : 1590 cycles , 13250 ns :
thread.resume.user.from.user - Resume thread : 1534 cycles , 12791 ns :
thread.abort.user.from.user - Abort thread : 2850 cycles , 23750 ns :
thread.start.kernel.from.user - Start thread : 1440 cycles , 12000 ns :
thread.suspend.kernel.from.user - Suspend thread : 1219 cycles , 10166 ns :
thread.resume.kernel.from.user - Resume thread : 1355 cycles , 11292 ns :
thread.abort.kernel.from.user - Abort thread : 2980 cycles , 24834 ns :
fifo.put.immediate.kernel - Add data to FIFO (no ctx switch) : 315 cycles , 2625 ns :
fifo.get.immediate.kernel - Get data from FIFO (no ctx switch) : 209 cycles , 1749 ns :
fifo.put.alloc.immediate.kernel - Allocate to add data to FIFO (no ctx switch) : 1040 cycles , 8667 ns :
fifo.get.free.immediate.kernel - Free when getting data from FIFO (no ctx switch) : 670 cycles , 5583 ns :
fifo.put.alloc.immediate.user - Allocate to add data to FIFO (no ctx switch) : 1765 cycles , 14709 ns :
fifo.get.free.immediate.user - Free when getting data from FIFO (no ctx switch) : 1410 cycles , 11750 ns :
fifo.get.blocking.k_to_k - Get data from FIFO (w/ ctx switch) : 1220 cycles , 10168 ns :
fifo.put.wake+ctx.k_to_k - Add data to FIFO (w/ ctx switch) : 1285 cycles , 10708 ns :
fifo.get.free.blocking.k_to_k - Free when getting data from FIFO (w/ ctx siwtch) : 1235 cycles , 10291 ns :
fifo.put.alloc.wake+ctx.k_to_k - Allocate to add data to FIFO (w/ ctx switch) : 1340 cycles , 11167 ns :
fifo.get.free.blocking.u_to_k - Free when getting data from FIFO (w/ ctx siwtch) : 1715 cycles , 14292 ns :
fifo.put.alloc.wake+ctx.k_to_u - Allocate to add data to FIFO (w/ ctx switch) : 1665 cycles , 13876 ns :
fifo.get.free.blocking.k_to_u - Free when getting data from FIFO (w/ ctx siwtch) : 1565 cycles , 13042 ns :
fifo.put.alloc.wake+ctx.u_to_k - Allocate to add data to FIFO (w/ ctx switch) : 1815 cycles , 15126 ns :
fifo.get.free.blocking.u_to_u - Free when getting data from FIFO (w/ ctx siwtch) : 2045 cycles , 17042 ns :
fifo.put.alloc.wake+ctx.u_to_u - Allocate to add data to FIFO (w/ ctx switch) : 2140 cycles , 17834 ns :
lifo.put.immediate.kernel - Add data to LIFO (no ctx switch) : 309 cycles , 2583 ns :
lifo.get.immediate.kernel - Get data from LIFO (no ctx switch) : 219 cycles , 1833 ns :
lifo.put.alloc.immediate.kernel - Allocate to add data to LIFO (no ctx switch) : 1030 cycles , 8583 ns :
lifo.get.free.immediate.kernel - Free when getting data from LIFO (no ctx switch) : 685 cycles , 5708 ns :
lifo.put.alloc.immediate.user - Allocate to add data to LIFO (no ctx switch) : 1755 cycles , 14625 ns :
lifo.get.free.immediate.user - Free when getting data from LIFO (no ctx switch) : 1405 cycles , 11709 ns :
lifo.get.blocking.k_to_k - Get data from LIFO (w/ ctx switch) : 1229 cycles , 10249 ns :
lifo.put.wake+ctx.k_to_k - Add data to LIFO (w/ ctx switch) : 1290 cycles , 10751 ns :
lifo.get.free.blocking.k_to_k - Free when getting data from LIFO (w/ ctx switch) : 1235 cycles , 10292 ns :
lifo.put.alloc.wake+ctx.k_to_k - Allocate to add data to LIFO (w/ ctx siwtch) : 1310 cycles , 10917 ns :
lifo.get.free.blocking.u_to_k - Free when getting data from LIFO (w/ ctx switch) : 1715 cycles , 14293 ns :
lifo.put.alloc.wake+ctx.k_to_u - Allocate to add data to LIFO (w/ ctx siwtch) : 1630 cycles , 13583 ns :
lifo.get.free.blocking.k_to_u - Free when getting data from LIFO (w/ ctx switch) : 1554 cycles , 12958 ns :
lifo.put.alloc.wake+ctx.u_to_k - Allocate to add data to LIFO (w/ ctx siwtch) : 1805 cycles , 15043 ns :
lifo.get.free.blocking.u_to_u - Free when getting data from LIFO (w/ ctx switch) : 2035 cycles , 16959 ns :
lifo.put.alloc.wake+ctx.u_to_u - Allocate to add data to LIFO (w/ ctx siwtch) : 2125 cycles , 17709 ns :
events.post.immediate.kernel - Post events (nothing wakes) : 295 cycles , 2458 ns :
events.set.immediate.kernel - Set events (nothing wakes) : 300 cycles , 2500 ns :
events.wait.immediate.kernel - Wait for any events (no ctx switch) : 220 cycles , 1833 ns :
events.wait_all.immediate.kernel - Wait for all events (no ctx switch) : 215 cycles , 1791 ns :
events.post.immediate.user - Post events (nothing wakes) : 795 cycles , 6625 ns :
events.set.immediate.user - Set events (nothing wakes) : 790 cycles , 6584 ns :
events.wait.immediate.user - Wait for any events (no ctx switch) : 740 cycles , 6167 ns :
events.wait_all.immediate.user - Wait for all events (no ctx switch) : 740 cycles , 6166 ns :
events.wait.blocking.k_to_k - Wait for any events (w/ ctx switch) : 1190 cycles , 9918 ns :
events.set.wake+ctx.k_to_k - Set events (w/ ctx switch) : 1464 cycles , 12208 ns :
events.wait_all.blocking.k_to_k - Wait for all events (w/ ctx switch) : 1235 cycles , 10292 ns :
events.post.wake+ctx.k_to_k - Post events (w/ ctx switch) : 1500 cycles , 12500 ns :
events.wait.blocking.u_to_k - Wait for any events (w/ ctx switch) : 1580 cycles , 13167 ns :
events.set.wake+ctx.k_to_u - Set events (w/ ctx switch) : 1630 cycles , 13583 ns :
events.wait_all.blocking.u_to_k - Wait for all events (w/ ctx switch) : 1765 cycles , 14708 ns :
events.post.wake+ctx.k_to_u - Post events (w/ ctx switch) : 1795 cycles , 14960 ns :
events.wait.blocking.k_to_u - Wait for any events (w/ ctx switch) : 1375 cycles , 11459 ns :
events.set.wake+ctx.u_to_k - Set events (w/ ctx switch) : 1825 cycles , 15209 ns :
events.wait_all.blocking.k_to_u - Wait for all events (w/ ctx switch) : 1555 cycles , 12958 ns :
events.post.wake+ctx.u_to_k - Post events (w/ ctx switch) : 1995 cycles , 16625 ns :
events.wait.blocking.u_to_u - Wait for any events (w/ ctx switch) : 1765 cycles , 14708 ns :
events.set.wake+ctx.u_to_u - Set events (w/ ctx switch) : 1989 cycles , 16583 ns :
events.wait_all.blocking.u_to_u - Wait for all events (w/ ctx switch) : 2085 cycles , 17376 ns :
events.post.wake+ctx.u_to_u - Post events (w/ ctx switch) : 2290 cycles , 19084 ns :
semaphore.give.immediate.kernel - Give a semaphore (no waiters) : 220 cycles , 1833 ns :
semaphore.take.immediate.kernel - Take a semaphore (no blocking) : 130 cycles , 1083 ns :
semaphore.give.immediate.user - Give a semaphore (no waiters) : 710 cycles , 5917 ns :
semaphore.take.immediate.user - Take a semaphore (no blocking) : 655 cycles , 5458 ns :
semaphore.take.blocking.k_to_k - Take a semaphore (context switch) : 1135 cycles , 9458 ns :
semaphore.give.wake+ctx.k_to_k - Give a semaphore (context switch) : 1244 cycles , 10374 ns :
semaphore.take.blocking.k_to_u - Take a semaphore (context switch) : 1325 cycles , 11048 ns :
semaphore.give.wake+ctx.u_to_k - Give a semaphore (context switch) : 1610 cycles , 13416 ns :
semaphore.take.blocking.u_to_k - Take a semaphore (context switch) : 1499 cycles , 12499 ns :
semaphore.give.wake+ctx.k_to_u - Give a semaphore (context switch) : 1434 cycles , 11957 ns :
semaphore.take.blocking.u_to_u - Take a semaphore (context switch) : 1690 cycles , 14090 ns :
semaphore.give.wake+ctx.u_to_u - Give a semaphore (context switch) : 1800 cycles , 15000 ns :
mutex.lock.immediate.recursive.kernel - Lock a mutex : 155 cycles , 1291 ns :
mutex.unlock.immediate.recursive.kernel - Unlock a mutex : 57 cycles , 475 ns :
mutex.lock.immediate.recursive.user - Lock a mutex : 665 cycles , 5541 ns :
mutex.unlock.immediate.recursive.user - Unlock a mutex : 585 cycles , 4875 ns :
heap.malloc.immediate - Average time for heap malloc : 640 cycles , 5341 ns :
heap.free.immediate - Average time for heap free : 436 cycles , 3633 ns :
===================================================================
PROJECT EXECUTION SUCCESSFUL

View file

@ -33,7 +33,8 @@ static void event_ops_entry(void *p1, void *p2, void *p3)
timing_t finish;
uint32_t i;
uint64_t cycles;
char description[80];
char tag[50];
char description[120];
k_event_clear(&event_set, ALL_EVENTS);
@ -43,9 +44,10 @@ static void event_ops_entry(void *p1, void *p2, void *p3)
}
finish = timing_timestamp_get();
snprintf(description, sizeof(description),
"EVENTS post.immediate.%s",
snprintf(tag, sizeof(tag), "events.post.immediate.%s",
(options & K_USER) ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Post events (nothing wakes)", tag);
cycles = timing_cycles_get(&start, &finish);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -56,9 +58,10 @@ static void event_ops_entry(void *p1, void *p2, void *p3)
}
finish = timing_timestamp_get();
snprintf(description, sizeof(description),
"EVENTS set.immediate.%s",
snprintf(tag, sizeof(tag), "events.set.immediate.%s",
(options & K_USER) ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Set events (nothing wakes)", tag);
cycles = timing_cycles_get(&start, &finish);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -69,9 +72,10 @@ static void event_ops_entry(void *p1, void *p2, void *p3)
}
finish = timing_timestamp_get();
snprintf(description, sizeof(description),
"EVENTS wait.immediate.%s",
snprintf(tag, sizeof(tag), "events.wait.immediate.%s",
(options & K_USER) ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Wait for any events (no ctx switch)", tag);
cycles = timing_cycles_get(&start, &finish);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -82,9 +86,10 @@ static void event_ops_entry(void *p1, void *p2, void *p3)
}
finish = timing_timestamp_get();
snprintf(description, sizeof(description),
"EVENTS wait_all.immediate.%s",
snprintf(tag, sizeof(tag), "events.wait_all.immediate.%s",
(options & K_USER) ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Wait for all events (no ctx switch)", tag);
cycles = timing_cycles_get(&start, &finish);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -97,7 +102,8 @@ static void start_thread_entry(void *p1, void *p2, void *p3)
uint32_t alt_options = (uint32_t)(uintptr_t)p3;
uint32_t i;
uint64_t cycles;
char description[80];
char tag[50];
char description[120];
k_thread_start(&alt_thread);
@ -109,20 +115,24 @@ static void start_thread_entry(void *p1, void *p2, void *p3)
k_event_set(&event_set, BENCH_EVENT_SET);
}
snprintf(tag, sizeof(tag),
"events.wait.blocking.%c_to_%c",
(alt_options & K_USER) ? 'u' : 'k',
(options & K_USER) ? 'u' : 'k');
snprintf(description, sizeof(description),
"EVENTS wait.blocking.(%c -> %c)",
(alt_options & K_USER) ? 'U' : 'K',
(options & K_USER) ? 'U' : 'K');
"%-40s - Wait for any events (w/ ctx switch)", tag);
cycles = timestamp.cycles -
timestamp_overhead_adjustment(options, alt_options);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(tag, sizeof(tag),
"events.set.wake+ctx.%c_to_%c",
(options & K_USER) ? 'u' : 'k',
(alt_options & K_USER) ? 'u' : 'k');
snprintf(description, sizeof(description),
"EVENTS set.wake+ctx.(%c -> %c)",
(options & K_USER) ? 'U' : 'K',
(alt_options & K_USER) ? 'U' : 'K');
"%-40s - Set events (w/ ctx switch)", tag);
cycles = timestamp.cycles -
timestamp_overhead_adjustment(options, alt_options);
PRINT_STATS_AVG(description, (uint32_t)cycles,
@ -137,19 +147,23 @@ static void start_thread_entry(void *p1, void *p2, void *p3)
k_event_post(&event_set, BENCH_EVENT_SET);
}
snprintf(tag, sizeof(tag),
"events.wait_all.blocking.%c_to_%c",
(alt_options & K_USER) ? 'u' : 'k',
(options & K_USER) ? 'u' : 'k');
snprintf(description, sizeof(description),
"EVENTS wait_all.blocking.(%c -> %c)",
(alt_options & K_USER) ? 'U' : 'K',
(options & K_USER) ? 'U' : 'K');
"%-40s - Wait for all events (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(tag, sizeof(tag),
"events.post.wake+ctx.%c_to_%c",
(options & K_USER) ? 'u' : 'k',
(alt_options & K_USER) ? 'u' : 'k');
snprintf(description, sizeof(description),
"EVENTS post.wake+ctx.(%c -> %c)",
(options & K_USER) ? 'U' : 'K',
(alt_options & K_USER) ? 'U' : 'K');
"%-40s - Post events (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");

View file

@ -93,7 +93,8 @@ int fifo_ops(uint32_t num_iterations, uint32_t options)
{
int priority;
uint64_t cycles;
char description[80];
char tag[50];
char description[120];
priority = k_thread_priority_get(k_current_get());
@ -111,9 +112,11 @@ int fifo_ops(uint32_t num_iterations, uint32_t options)
k_thread_start(&start_thread);
if ((options & K_USER) == 0) {
snprintf(description, sizeof(description),
"FIFO put.immediate.%s",
snprintf(tag, sizeof(tag),
"fifo.put.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Add data to FIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
cycles -= timestamp_overhead_adjustment(options, options);
@ -121,9 +124,11 @@ int fifo_ops(uint32_t num_iterations, uint32_t options)
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"FIFO get.immediate.%s",
snprintf(tag, sizeof(tag),
"fifo.get.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Get data from FIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
cycles -= timestamp_overhead_adjustment(options, options);
PRINT_STATS_AVG(description, (uint32_t)cycles,
@ -131,18 +136,22 @@ int fifo_ops(uint32_t num_iterations, uint32_t options)
k_sem_give(&pause_sem);
}
snprintf(description, sizeof(description),
"FIFO put.alloc.immediate.%s",
snprintf(tag, sizeof(tag),
"fifo.put.alloc.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Allocate to add data to FIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"FIFO get.free.immediate.%s",
snprintf(tag, sizeof(tag),
"fifo.get.free.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Free when getting data from FIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -258,7 +267,8 @@ int fifo_blocking_ops(uint32_t num_iterations, uint32_t start_options,
{
int priority;
uint64_t cycles;
char description[80];
char tag[50];
char description[120];
priority = k_thread_priority_get(k_current_get());
@ -284,40 +294,48 @@ int fifo_blocking_ops(uint32_t num_iterations, uint32_t start_options,
k_thread_start(&start_thread);
if (((start_options | alt_options) & K_USER) == 0) {
snprintf(tag, sizeof(tag),
"fifo.get.blocking.%s_to_%s",
alt_options & K_USER ? "u" : "k",
start_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"FIFO get.blocking.(%s -> %s)",
alt_options & K_USER ? "U" : "K",
start_options & K_USER ? "U" : "K");
"%-40s - Get data from FIFO (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(tag, sizeof(tag),
"fifo.put.wake+ctx.%s_to_%s",
start_options & K_USER ? "u" : "k",
alt_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"FIFO put.wake+ctx.(%s -> %s)",
start_options & K_USER ? "U" : "K",
alt_options & K_USER ? "U" : "K");
"%-40s - Add data to FIFO (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
}
snprintf(tag, sizeof(tag),
"fifo.get.free.blocking.%s_to_%s",
alt_options & K_USER ? "u" : "k",
start_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"FIFO get.free.blocking.(%s -> %s)",
alt_options & K_USER ? "U" : "K",
start_options & K_USER ? "U" : "K");
"%-40s - Free when getting data from FIFO (w/ ctx siwtch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(tag, sizeof(tag),
"fifo.put.alloc.wake+ctx.%s_to_%s",
start_options & K_USER ? "u" : "k",
alt_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"FIFO put.alloc.wake+ctx.(%s -> %s)",
start_options & K_USER ? "U" : "K",
alt_options & K_USER ? "U" : "K");
"%-40s - Allocate to add data to FIFO (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");

View file

@ -25,6 +25,7 @@ void heap_malloc_free(void)
bool failed = false;
char error_string[80];
char description[120];
const char *notes = "";
timing_start();
@ -63,10 +64,15 @@ void heap_malloc_free(void)
notes = "Memory heap too small--increase it.";
}
PRINT_STATS_AVG("HEAP malloc.immediate", sum_malloc, count,
failed, notes);
PRINT_STATS_AVG("HEAP free.immediate", sum_free, count,
failed, notes);
snprintf(description, sizeof(description),
"%-40s - Average time for heap malloc",
"heap.malloc.immediate");
PRINT_STATS_AVG(description, sum_malloc, count, failed, notes);
snprintf(description, sizeof(description),
"%-40s - Average time for heap free",
"heap.free.immediate");
PRINT_STATS_AVG(description, sum_free, count, failed, notes);
timing_stop();
}

View file

@ -166,6 +166,7 @@ static void int_to_another_thread(uint32_t num_iterations, uint64_t *sum,
int int_to_thread(uint32_t num_iterations)
{
uint64_t sum;
char description[120];
timing_start();
TICK_SYNCH();
@ -174,8 +175,10 @@ int int_to_thread(uint32_t num_iterations)
sum -= timestamp_overhead_adjustment(0, 0);
PRINT_STATS_AVG("ISR resume.interrupted.thread.kernel",
(uint32_t)sum, num_iterations, false, "");
snprintf(description, sizeof(description),
"%-40s - Return from ISR to interrupted thread",
"isr.resume.interrupted.thread.kernel");
PRINT_STATS_AVG(description, (uint32_t)sum, num_iterations, false, "");
/* ************** */
@ -183,8 +186,10 @@ int int_to_thread(uint32_t num_iterations)
sum -= timestamp_overhead_adjustment(0, 0);
PRINT_STATS_AVG("ISR resume.different.thread.kernel",
(uint32_t)sum, num_iterations, false, "");
snprintf(description, sizeof(description),
"%-40s - Return from ISR to another thread",
"isr.resume.different.thread.kernel");
PRINT_STATS_AVG(description, (uint32_t)sum, num_iterations, false, "");
/* ************** */
@ -193,8 +198,10 @@ int int_to_thread(uint32_t num_iterations)
sum -= timestamp_overhead_adjustment(0, K_USER);
PRINT_STATS_AVG("ISR resume.different.thread.user",
(uint32_t)sum, num_iterations, false, "");
snprintf(description, sizeof(description),
"%-40s - Return from ISR to another thread",
"isr.resume.different.thread.user");
PRINT_STATS_AVG(description, (uint32_t)sum, num_iterations, false, "");
#endif
timing_stop();

View file

@ -93,7 +93,8 @@ int lifo_ops(uint32_t num_iterations, uint32_t options)
{
int priority;
uint64_t cycles;
char description[80];
char tag[50];
char description[120];
priority = k_thread_priority_get(k_current_get());
@ -111,9 +112,11 @@ int lifo_ops(uint32_t num_iterations, uint32_t options)
k_thread_start(&start_thread);
if ((options & K_USER) == 0) {
snprintf(description, sizeof(description),
"LIFO put.immediate.%s",
snprintf(tag, sizeof(tag),
"lifo.put.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Add data to LIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
cycles -= timestamp_overhead_adjustment(options, options);
@ -121,9 +124,11 @@ int lifo_ops(uint32_t num_iterations, uint32_t options)
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"LIFO get.immediate.%s",
snprintf(tag, sizeof(tag),
"lifo.get.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Get data from LIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
cycles -= timestamp_overhead_adjustment(options, options);
PRINT_STATS_AVG(description, (uint32_t)cycles,
@ -131,18 +136,22 @@ int lifo_ops(uint32_t num_iterations, uint32_t options)
k_sem_give(&pause_sem);
}
snprintf(description, sizeof(description),
"LIFO put.alloc.immediate.%s",
snprintf(tag, sizeof(tag),
"lifo.put.alloc.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Allocate to add data to LIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"LIFO get.free.immediate.%s",
snprintf(tag, sizeof(tag),
"lifo.get.free.immediate.%s",
options & K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Free when getting data from LIFO (no ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -258,7 +267,8 @@ int lifo_blocking_ops(uint32_t num_iterations, uint32_t start_options,
{
int priority;
uint64_t cycles;
char description[80];
char tag[50];
char description[120];
priority = k_thread_priority_get(k_current_get());
@ -284,40 +294,48 @@ int lifo_blocking_ops(uint32_t num_iterations, uint32_t start_options,
k_thread_start(&start_thread);
if (((start_options | alt_options) & K_USER) == 0) {
snprintf(tag, sizeof(tag),
"lifo.get.blocking.%s_to_%s",
alt_options & K_USER ? "u" : "k",
start_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"LIFO get.blocking.(%s -> %s)",
alt_options & K_USER ? "U" : "K",
start_options & K_USER ? "U" : "K");
"%-40s - Get data from LIFO (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(tag, sizeof(tag),
"lifo.put.wake+ctx.%s_to_%s",
start_options & K_USER ? "u" : "k",
alt_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"LIFO put.wake+ctx.(%s -> %s)",
start_options & K_USER ? "U" : "K",
alt_options & K_USER ? "U" : "K");
"%-40s - Add data to LIFO (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
}
snprintf(tag, sizeof(tag),
"lifo.get.free.blocking.%s_to_%s",
alt_options & K_USER ? "u" : "k",
start_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"LIFO get.free.blocking.(%s -> %s)",
alt_options & K_USER ? "U" : "K",
start_options & K_USER ? "U" : "K");
"%-40s - Free when getting data from LIFO (w/ ctx switch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
k_sem_give(&pause_sem);
snprintf(tag, sizeof(tag),
"lifo.put.alloc.wake+ctx.%s_to_%s",
start_options & K_USER ? "u" : "k",
alt_options & K_USER ? "u" : "k");
snprintf(description, sizeof(description),
"LIFO put.alloc.wake+ctx.(%s -> %s)",
start_options & K_USER ? "U" : "K",
alt_options & K_USER ? "U" : "K");
"%-40s - Allocate to add data to LIFO (w/ ctx siwtch)", tag);
cycles = timestamp.cycles;
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");

View file

@ -73,7 +73,8 @@ static void start_lock_unlock(void *p1, void *p2, void *p3)
*/
int mutex_lock_unlock(uint32_t num_iterations, uint32_t options)
{
char description[80];
char tag[50];
char description[120];
int priority;
uint64_t cycles;
@ -93,17 +94,21 @@ int mutex_lock_unlock(uint32_t num_iterations, uint32_t options)
cycles = timestamp.cycles;
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"MUTEX lock.immediate.recursive.%s",
snprintf(tag, sizeof(tag),
"mutex.lock.immediate.recursive.%s",
(options & K_USER) == K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Lock a mutex", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles, num_iterations,
false, "");
cycles = timestamp.cycles;
snprintf(description, sizeof(description),
"MUTEX unlock.immediate.recursive.%s",
snprintf(tag, sizeof(tag),
"mutex.unlock.immediate.recursive.%s",
(options & K_USER) == K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Unlock a mutex", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles, num_iterations,
false, "");

View file

@ -106,7 +106,8 @@ void sema_context_switch(uint32_t num_iterations,
uint32_t start_options, uint32_t alt_options)
{
uint64_t cycles;
char description[80];
char tag[50];
char description[120];
int priority;
timing_start();
@ -138,10 +139,12 @@ void sema_context_switch(uint32_t num_iterations,
cycles = timestamp.cycles;
cycles -= timestamp_overhead_adjustment(start_options, alt_options);
snprintf(tag, sizeof(tag),
"semaphore.take.blocking.%c_to_%c",
((start_options & K_USER) == K_USER) ? 'u' : 'k',
((alt_options & K_USER) == K_USER) ? 'u' : 'k');
snprintf(description, sizeof(description),
"SEMAPHORE take.blocking.(%c -> %c)",
((start_options & K_USER) == K_USER) ? 'U' : 'K',
((alt_options & K_USER) == K_USER) ? 'U' : 'K');
"%-40s - Take a semaphore (context switch)", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -154,10 +157,12 @@ void sema_context_switch(uint32_t num_iterations,
cycles = timestamp.cycles;
cycles -= timestamp_overhead_adjustment(start_options, alt_options);
snprintf(tag, sizeof(tag),
"semaphore.give.wake+ctx.%c_to_%c",
((alt_options & K_USER) == K_USER) ? 'u' : 'k',
((start_options & K_USER) == K_USER) ? 'u' : 'k');
snprintf(description, sizeof(description),
"SEMAPHORE give.wake+ctx.(%c -> %c)",
((alt_options & K_USER) == K_USER) ? 'U' : 'K',
((start_options & K_USER) == K_USER) ? 'U' : 'K');
"%-40s - Give a semaphore (context switch)", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -233,7 +238,8 @@ int sema_test_signal(uint32_t num_iterations, uint32_t options)
{
uint64_t cycles;
int priority;
char description[80];
char tag[50];
char description[120];
timing_start();
@ -254,9 +260,11 @@ int sema_test_signal(uint32_t num_iterations, uint32_t options)
cycles = timestamp.cycles;
snprintf(description, sizeof(description),
"SEMAPHORE give.immediate.%s",
snprintf(tag, sizeof(tag),
"semaphore.give.immediate.%s",
(options & K_USER) == K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Give a semaphore (no waiters)", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -273,9 +281,11 @@ int sema_test_signal(uint32_t num_iterations, uint32_t options)
cycles = timestamp.cycles;
snprintf(description, sizeof(description),
"SEMAPHORE take.immediate.%s",
snprintf(tag, sizeof(tag),
"semaphore.take.immediate.%s",
(options & K_USER) == K_USER ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Take a semaphore (no blocking)", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");

View file

@ -174,7 +174,8 @@ int thread_ops(uint32_t num_iterations, uint32_t start_options, uint32_t alt_opt
int priority;
uint64_t cycles;
uint32_t bit_options = START_ALT;
char description[80];
char tag[50];
char description[120];
priority = k_thread_priority_get(k_current_get());
@ -247,10 +248,12 @@ int thread_ops(uint32_t num_iterations, uint32_t start_options, uint32_t alt_opt
/* Only report stats if <start_thread> created <alt_thread> */
snprintf(description, sizeof(description),
"THREAD create.%s.from.%s",
snprintf(tag, sizeof(tag),
"thread.create.%s.from.%s",
(alt_options & K_USER) != 0 ? "user" : "kernel",
(start_options & K_USER) != 0 ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Create thread", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -260,10 +263,12 @@ int thread_ops(uint32_t num_iterations, uint32_t start_options, uint32_t alt_opt
cycles -= timestamp_overhead_adjustment(start_options, alt_options);
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"THREAD start.%s.from.%s",
snprintf(tag, sizeof(tag),
"thread.start.%s.from.%s",
(alt_options & K_USER) != 0 ? "user" : "kernel",
(start_options & K_USER) != 0 ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Start thread", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -272,10 +277,12 @@ int thread_ops(uint32_t num_iterations, uint32_t start_options, uint32_t alt_opt
cycles -= timestamp_overhead_adjustment(start_options, alt_options);
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"THREAD suspend.%s.from.%s",
snprintf(tag, sizeof(tag),
"thread.suspend.%s.from.%s",
(alt_options & K_USER) != 0 ? "user" : "kernel",
(start_options & K_USER) != 0 ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Suspend thread", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -284,10 +291,12 @@ int thread_ops(uint32_t num_iterations, uint32_t start_options, uint32_t alt_opt
cycles -= timestamp_overhead_adjustment(start_options, alt_options);
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"THREAD resume.%s.from.%s",
snprintf(tag, sizeof(tag),
"thread.resume.%s.from.%s",
(alt_options & K_USER) != 0 ? "user" : "kernel",
(start_options & K_USER) != 0 ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Resume thread", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");
@ -296,10 +305,12 @@ int thread_ops(uint32_t num_iterations, uint32_t start_options, uint32_t alt_opt
cycles -= timestamp_overhead_adjustment(start_options, alt_options);
k_sem_give(&pause_sem);
snprintf(description, sizeof(description),
"THREAD abort.%s.from.%s",
snprintf(tag, sizeof(tag),
"thread.abort.%s.from.%s",
(alt_options & K_USER) != 0 ? "user" : "kernel",
(start_options & K_USER) != 0 ? "user" : "kernel");
snprintf(description, sizeof(description),
"%-40s - Abort thread", tag);
PRINT_STATS_AVG(description, (uint32_t)cycles,
num_iterations, false, "");

View file

@ -96,7 +96,8 @@ static void thread_switch_yield_common(const char *description,
int priority)
{
uint64_t sum;
char summary[80];
char tag[50];
char summary[120];
/* Create the two threads */
@ -130,11 +131,12 @@ static void thread_switch_yield_common(const char *description,
sum -= timestamp_overhead_adjustment(start_options, alt_options);
snprintf(tag, sizeof(tag),
"%s.%c_to_%c", description,
(start_options & K_USER) == K_USER ? 'u' : 'k',
(alt_options & K_USER) == K_USER ? 'u' : 'k');
snprintf(summary, sizeof(summary),
"%s.(%c -> %c)",
description,
(start_options & K_USER) == K_USER ? 'U' : 'K',
(alt_options & K_USER) == K_USER ? 'U' : 'K');
"%-40s - Context switch via k_yield", tag);
PRINT_STATS_AVG(summary, (uint32_t)sum, num_iterations, 0, "");
}
@ -142,13 +144,13 @@ static void thread_switch_yield_common(const char *description,
void thread_switch_yield(uint32_t num_iterations, bool is_cooperative)
{
int priority;
char description[60];
char description[40];
priority = is_cooperative ? K_PRIO_COOP(6)
: k_thread_priority_get(k_current_get()) - 1;
snprintf(description, sizeof(description),
"THREAD yield.%s.ctx",
"thread.yield.%s.ctx",
is_cooperative ? "cooperative" : "preemptive");
/* Kernel -> Kernel */

View file

@ -50,11 +50,11 @@ extern int error_count;
#define TICK_OCCURRENCE_ERROR "Error: Tick Occurred"
#ifdef CSV_FORMAT_OUTPUT
#define FORMAT_STR "%-52s,%s,%s,%s\n"
#define FORMAT_STR "%-94s,%s,%s,%s\n"
#define CYCLE_FORMAT "%8u"
#define NSEC_FORMAT "%8u"
#else
#define FORMAT_STR "%-52s:%s , %s : %s\n"
#define FORMAT_STR "%-94s:%s , %s : %s\n"
#define CYCLE_FORMAT "%8u cycles"
#define NSEC_FORMAT "%8u ns"
#endif