|
| 1 | +/// This software is distributed under the terms of the MIT License. |
| 2 | +/// Copyright (C) OpenCyphal Development Team <opencyphal.org> |
| 3 | +/// Copyright Amazon.com Inc. or its affiliates. |
| 4 | +/// SPDX-License-Identifier: MIT |
| 5 | + |
| 6 | +#include <udpard.c> // NOLINT(bugprone-suspicious-include) |
| 7 | +#include <unity.h> |
| 8 | + |
| 9 | +// Minimal helpers to avoid heap use in guard paths. |
| 10 | +static void free_noop(void* const user, const size_t size, void* const pointer) |
| 11 | +{ |
| 12 | + (void)user; |
| 13 | + (void)size; |
| 14 | + (void)pointer; |
| 15 | +} |
| 16 | + |
| 17 | +static void* alloc_stub(void* const user, const size_t size) |
| 18 | +{ |
| 19 | + (void)size; |
| 20 | + return (size > 0U) ? user : NULL; |
| 21 | +} |
| 22 | + |
| 23 | +static void* alloc_alt(void* const user, const size_t size) |
| 24 | +{ |
| 25 | + (void)size; |
| 26 | + return (byte_t*)user + 1; |
| 27 | +} |
| 28 | + |
| 29 | +static udpard_mem_resource_t make_mem(void* const tag) |
| 30 | +{ |
| 31 | + const udpard_mem_resource_t out = { .user = tag, .free = free_noop, .alloc = alloc_stub }; |
| 32 | + return out; |
| 33 | +} |
| 34 | + |
| 35 | +static bool eject_stub(udpard_tx_t* const tx, const udpard_tx_ejection_t ejection) |
| 36 | +{ |
| 37 | + (void)tx; |
| 38 | + (void)ejection; |
| 39 | + return true; |
| 40 | +} |
| 41 | + |
| 42 | +static void on_message_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, udpard_rx_transfer_t transfer) |
| 43 | +{ |
| 44 | + (void)rx; |
| 45 | + (void)port; |
| 46 | + (void)transfer; |
| 47 | +} |
| 48 | + |
| 49 | +static void on_collision_stub(udpard_rx_t* const rx, udpard_rx_port_t* const port, const udpard_remote_t remote) |
| 50 | +{ |
| 51 | + (void)rx; |
| 52 | + (void)port; |
| 53 | + (void)remote; |
| 54 | +} |
| 55 | + |
| 56 | +static void test_mem_endpoint_list_guards(void) |
| 57 | +{ |
| 58 | + // mem_same covers identical and divergent resources. |
| 59 | + const udpard_mem_resource_t mem_a = make_mem((void*)1); |
| 60 | + const udpard_mem_resource_t mem_b = make_mem((void*)2); |
| 61 | + const udpard_mem_resource_t mem_c = { .user = (void*)1, .free = free_noop, .alloc = alloc_alt }; |
| 62 | + TEST_ASSERT_TRUE(mem_same(mem_a, mem_a)); |
| 63 | + TEST_ASSERT_FALSE(mem_same(mem_a, mem_b)); |
| 64 | + TEST_ASSERT_FALSE(mem_same(mem_a, mem_c)); |
| 65 | + |
| 66 | + // Endpoint validation handles invalid inputs. |
| 67 | + TEST_ASSERT_TRUE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 1U, .port = UDP_PORT })); |
| 68 | + TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 0U, .port = UDP_PORT })); |
| 69 | + TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = UINT32_MAX, .port = UDP_PORT })); |
| 70 | + TEST_ASSERT_FALSE(udpard_is_valid_endpoint((udpard_udpip_ep_t){ .ip = 1U, .port = 0U })); |
| 71 | + |
| 72 | + // is_listed covers empty and populated state. |
| 73 | + udpard_list_t list = { 0 }; |
| 74 | + udpard_list_member_t member = { 0 }; |
| 75 | + TEST_ASSERT_FALSE(is_listed(&list, &member)); |
| 76 | + enlist_head(&list, &member); |
| 77 | + TEST_ASSERT_TRUE(is_listed(&list, &member)); |
| 78 | +} |
| 79 | + |
| 80 | +static void test_fragment_guards(void) |
| 81 | +{ |
| 82 | + // Null fragments return NULL paths cleanly. |
| 83 | + TEST_ASSERT_NULL(udpard_fragment_seek(NULL, 0)); |
| 84 | + TEST_ASSERT_NULL(udpard_fragment_next(NULL)); |
| 85 | + |
| 86 | + // Offsets past the end yield no data. |
| 87 | + static const byte_t payload[] = { 1U, 2U }; |
| 88 | + udpard_fragment_t frag = { .index_offset = { NULL, { NULL, NULL }, 0 }, |
| 89 | + .offset = 4U, |
| 90 | + .view = { .size = sizeof(payload), .data = payload }, |
| 91 | + .origin = { .size = 0U, .data = NULL }, |
| 92 | + .payload_deleter = { 0 } }; |
| 93 | + const udpard_fragment_t* cursor = &frag; |
| 94 | + byte_t out[2] = { 0 }; |
| 95 | + TEST_ASSERT_NULL(udpard_fragment_seek(&frag, frag.offset + frag.view.size)); |
| 96 | + TEST_ASSERT_EQUAL_UINT(0, udpard_fragment_gather(NULL, 0, 1, out)); |
| 97 | + TEST_ASSERT_EQUAL_UINT(0, udpard_fragment_gather(&cursor, frag.offset + frag.view.size, 1, out)); |
| 98 | +} |
| 99 | + |
| 100 | +static void test_header_guard(void) |
| 101 | +{ |
| 102 | + // Deserializer rejects missing payload pointers. |
| 103 | + meta_t meta = { 0 }; |
| 104 | + udpard_bytes_t payload; |
| 105 | + uint32_t frame_index = 0; |
| 106 | + uint32_t frame_offset = 0; |
| 107 | + uint32_t prefix_crc = 0; |
| 108 | + TEST_ASSERT_FALSE(header_deserialize((udpard_bytes_mut_t){ .size = HEADER_SIZE_BYTES, .data = NULL }, |
| 109 | + &meta, |
| 110 | + &frame_index, |
| 111 | + &frame_offset, |
| 112 | + &prefix_crc, |
| 113 | + &payload)); |
| 114 | +} |
| 115 | + |
| 116 | +static void test_tx_guards(void) |
| 117 | +{ |
| 118 | + // Prepare reusable TX resources. |
| 119 | + udpard_tx_mem_resources_t mem = { .transfer = make_mem((void*)11) }; |
| 120 | + for (size_t i = 0; i < UDPARD_IFACE_COUNT_MAX; i++) { |
| 121 | + mem.payload[i] = make_mem((void*)(20 + i)); |
| 122 | + } |
| 123 | + const udpard_tx_vtable_t vt_ok = { .eject = eject_stub }; |
| 124 | + |
| 125 | + // Reject bad initialization inputs. |
| 126 | + udpard_tx_t tx = { 0 }; |
| 127 | + TEST_ASSERT_FALSE(udpard_tx_new(NULL, 1U, 0U, 1U, mem, &vt_ok)); |
| 128 | + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 0U, 0U, 1U, mem, &vt_ok)); |
| 129 | + udpard_tx_mem_resources_t mem_bad = mem; |
| 130 | + mem_bad.payload[0].alloc = NULL; |
| 131 | + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem_bad, &vt_ok)); |
| 132 | + const udpard_tx_vtable_t vt_bad = { .eject = NULL }; |
| 133 | + TEST_ASSERT_FALSE(udpard_tx_new(&tx, 1U, 0U, 1U, mem, &vt_bad)); |
| 134 | + TEST_ASSERT_TRUE(udpard_tx_new(&tx, 1U, 0U, 2U, mem, &vt_ok)); |
| 135 | + |
| 136 | + // Push helpers reject invalid timing and null handles. |
| 137 | + const udpard_udpip_ep_t endpoints[UDPARD_IFACE_COUNT_MAX] = { { .ip = 1U, .port = UDP_PORT }, |
| 138 | + { 0U, 0U }, |
| 139 | + { 0U, 0U } }; |
| 140 | + const udpard_bytes_scattered_t empty_payload = { .bytes = { .size = 0U, .data = NULL }, .next = NULL }; |
| 141 | + TEST_ASSERT_EQUAL_UINT32( |
| 142 | + 0, udpard_tx_push(&tx, 10, 5, udpard_prio_fast, 1U, endpoints, 1U, empty_payload, NULL, NULL)); |
| 143 | + TEST_ASSERT_EQUAL_UINT32( |
| 144 | + 0, udpard_tx_push(NULL, 0, 0, udpard_prio_fast, 1U, endpoints, 1U, empty_payload, NULL, NULL)); |
| 145 | + TEST_ASSERT_EQUAL_UINT32( |
| 146 | + 0, udpard_tx_push_p2p(NULL, 0, 0, udpard_prio_fast, 1U, 1U, (udpard_remote_t){ 0 }, empty_payload, NULL, NULL)); |
| 147 | + |
| 148 | + // Poll and refcount no-ops on null data. |
| 149 | + udpard_tx_poll(NULL, 0, 0); |
| 150 | + udpard_tx_refcount_inc((udpard_bytes_t){ .size = 0U, .data = NULL }); |
| 151 | + udpard_tx_refcount_dec((udpard_bytes_t){ .size = 0U, .data = NULL }); |
| 152 | + udpard_tx_free(NULL); |
| 153 | + udpard_tx_free(&tx); |
| 154 | +} |
| 155 | + |
| 156 | +static void test_tx_predictor_sharing(void) |
| 157 | +{ |
| 158 | + // Shared spool suppresses duplicate frame counts. |
| 159 | + const udpard_mem_resource_t mem_shared = make_mem((void*)42); |
| 160 | + const udpard_mem_resource_t mem_arr[UDPARD_IFACE_COUNT_MAX] = { mem_shared, mem_shared, make_mem((void*)77) }; |
| 161 | + const udpard_udpip_ep_t ep[UDPARD_IFACE_COUNT_MAX] = { { .ip = 1U, .port = UDP_PORT }, |
| 162 | + { .ip = 2U, .port = UDP_PORT }, |
| 163 | + { 0U, 0U } }; |
| 164 | + const size_t mtu[UDPARD_IFACE_COUNT_MAX] = { 64U, 64U, 128U }; |
| 165 | + TEST_ASSERT_EQUAL_size_t(1U, tx_predict_frame_count(mtu, mem_arr, ep, 16U)); |
| 166 | +} |
| 167 | + |
| 168 | +static void test_rx_guards(void) |
| 169 | +{ |
| 170 | + // RX port creation guards reject invalid parameters. |
| 171 | + const udpard_rx_mem_resources_t rx_mem = { .session = make_mem((void*)5), .fragment = make_mem((void*)6) }; |
| 172 | + const udpard_rx_port_vtable_t rx_vtb = { .on_message = on_message_stub, .on_collision = on_collision_stub }; |
| 173 | + udpard_rx_port_t port; |
| 174 | + TEST_ASSERT_FALSE(udpard_rx_port_new(NULL, 0, 0, 0, rx_mem, &rx_vtb)); |
| 175 | + udpard_rx_mem_resources_t bad_rx_mem = rx_mem; |
| 176 | + bad_rx_mem.session.alloc = NULL; |
| 177 | + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, UDPARD_RX_REORDERING_WINDOW_UNORDERED, bad_rx_mem, &rx_vtb)); |
| 178 | + TEST_ASSERT_FALSE(udpard_rx_port_new(&port, 0, 0, (udpard_us_t)-3, rx_mem, &rx_vtb)); |
| 179 | + TEST_ASSERT_TRUE(udpard_rx_port_new(&port, 0xAA, 8U, UDPARD_RX_REORDERING_WINDOW_STATELESS, rx_mem, &rx_vtb)); |
| 180 | + |
| 181 | + // Invalid datagram inputs are rejected without processing. |
| 182 | + udpard_rx_t rx; |
| 183 | + udpard_rx_new(&rx, NULL); |
| 184 | + TEST_ASSERT_FALSE(udpard_rx_port_push(&rx, |
| 185 | + &port, |
| 186 | + 0, |
| 187 | + (udpard_udpip_ep_t){ 0U, 0U }, |
| 188 | + (udpard_bytes_mut_t){ .size = 0U, .data = NULL }, |
| 189 | + (udpard_mem_deleter_t){ .user = NULL, .free = NULL }, |
| 190 | + UDPARD_IFACE_COUNT_MAX)); |
| 191 | + |
| 192 | + // Guard paths for P2P port creation and port freeing. |
| 193 | + udpard_rx_port_p2p_t p2p; |
| 194 | + udpard_rx_port_p2p_vtable_t p2p_vt = { .on_message = NULL }; |
| 195 | + TEST_ASSERT_FALSE(udpard_rx_port_new_p2p(&p2p, 1U, 0, rx_mem, &p2p_vt)); |
| 196 | + udpard_rx_port_free(NULL, &port); |
| 197 | + |
| 198 | + // Fragments past extent are discarded early. |
| 199 | + udpard_tree_t* root = NULL; |
| 200 | + byte_t buf[1] = { 0 }; |
| 201 | + size_t covered = 0; |
| 202 | + const rx_frame_base_t frame = { .offset = 1U, |
| 203 | + .payload = { .size = sizeof(buf), .data = buf }, |
| 204 | + .origin = { .size = sizeof(buf), .data = buf } }; |
| 205 | + const udpard_mem_resource_t frag_mem = make_mem((void*)7); |
| 206 | + const udpard_mem_deleter_t deleter = { .user = NULL, .free = free_noop }; |
| 207 | + TEST_ASSERT_EQUAL(rx_fragment_tree_rejected, |
| 208 | + rx_fragment_tree_update(&root, frag_mem, deleter, frame, 0U, 0U, &covered)); |
| 209 | +} |
| 210 | + |
| 211 | +void setUp(void) {} |
| 212 | + |
| 213 | +void tearDown(void) {} |
| 214 | + |
| 215 | +int main(void) |
| 216 | +{ |
| 217 | + UNITY_BEGIN(); |
| 218 | + RUN_TEST(test_mem_endpoint_list_guards); |
| 219 | + RUN_TEST(test_fragment_guards); |
| 220 | + RUN_TEST(test_header_guard); |
| 221 | + RUN_TEST(test_tx_guards); |
| 222 | + RUN_TEST(test_tx_predictor_sharing); |
| 223 | + RUN_TEST(test_rx_guards); |
| 224 | + return UNITY_END(); |
| 225 | +} |
0 commit comments