ivc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /*
  2. * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch_helpers.h>
  7. #include <assert.h>
  8. #include <common/debug.h>
  9. #include <errno.h>
  10. #include <stdbool.h>
  11. #include <stddef.h>
  12. #include <string.h>
  13. #include "ivc.h"
  14. /*
  15. * IVC channel reset protocol.
  16. *
  17. * Each end uses its tx_channel.state to indicate its synchronization state.
  18. */
  19. enum {
  20. /*
  21. * This value is zero for backwards compatibility with services that
  22. * assume channels to be initially zeroed. Such channels are in an
  23. * initially valid state, but cannot be asynchronously reset, and must
  24. * maintain a valid state at all times.
  25. *
  26. * The transmitting end can enter the established state from the sync or
  27. * ack state when it observes the receiving endpoint in the ack or
  28. * established state, indicating that has cleared the counters in our
  29. * rx_channel.
  30. */
  31. ivc_state_established = U(0),
  32. /*
  33. * If an endpoint is observed in the sync state, the remote endpoint is
  34. * allowed to clear the counters it owns asynchronously with respect to
  35. * the current endpoint. Therefore, the current endpoint is no longer
  36. * allowed to communicate.
  37. */
  38. ivc_state_sync = U(1),
  39. /*
  40. * When the transmitting end observes the receiving end in the sync
  41. * state, it can clear the w_count and r_count and transition to the ack
  42. * state. If the remote endpoint observes us in the ack state, it can
  43. * return to the established state once it has cleared its counters.
  44. */
  45. ivc_state_ack = U(2)
  46. };
  47. /*
  48. * This structure is divided into two-cache aligned parts, the first is only
  49. * written through the tx_channel pointer, while the second is only written
  50. * through the rx_channel pointer. This delineates ownership of the cache lines,
  51. * which is critical to performance and necessary in non-cache coherent
  52. * implementations.
  53. */
  54. struct ivc_channel_header {
  55. struct {
  56. /* fields owned by the transmitting end */
  57. uint32_t w_count;
  58. uint32_t state;
  59. uint32_t w_rsvd[IVC_CHHDR_TX_FIELDS - 2];
  60. };
  61. struct {
  62. /* fields owned by the receiving end */
  63. uint32_t r_count;
  64. uint32_t r_rsvd[IVC_CHHDR_RX_FIELDS - 1];
  65. };
  66. };
  67. static inline bool ivc_channel_empty(const struct ivc *ivc,
  68. volatile const struct ivc_channel_header *ch)
  69. {
  70. /*
  71. * This function performs multiple checks on the same values with
  72. * security implications, so sample the counters' current values in
  73. * shared memory to ensure that these checks use the same values.
  74. */
  75. uint32_t wr_count = ch->w_count;
  76. uint32_t rd_count = ch->r_count;
  77. bool ret = false;
  78. (void)ivc;
  79. /*
  80. * Perform an over-full check to prevent denial of service attacks where
  81. * a server could be easily fooled into believing that there's an
  82. * extremely large number of frames ready, since receivers are not
  83. * expected to check for full or over-full conditions.
  84. *
  85. * Although the channel isn't empty, this is an invalid case caused by
  86. * a potentially malicious peer, so returning empty is safer, because it
  87. * gives the impression that the channel has gone silent.
  88. */
  89. if (((wr_count - rd_count) > ivc->nframes) || (wr_count == rd_count)) {
  90. ret = true;
  91. }
  92. return ret;
  93. }
  94. static inline bool ivc_channel_full(const struct ivc *ivc,
  95. volatile const struct ivc_channel_header *ch)
  96. {
  97. uint32_t wr_count = ch->w_count;
  98. uint32_t rd_count = ch->r_count;
  99. (void)ivc;
  100. /*
  101. * Invalid cases where the counters indicate that the queue is over
  102. * capacity also appear full.
  103. */
  104. return ((wr_count - rd_count) >= ivc->nframes);
  105. }
  106. static inline uint32_t ivc_channel_avail_count(const struct ivc *ivc,
  107. volatile const struct ivc_channel_header *ch)
  108. {
  109. uint32_t wr_count = ch->w_count;
  110. uint32_t rd_count = ch->r_count;
  111. (void)ivc;
  112. /*
  113. * This function isn't expected to be used in scenarios where an
  114. * over-full situation can lead to denial of service attacks. See the
  115. * comment in ivc_channel_empty() for an explanation about special
  116. * over-full considerations.
  117. */
  118. return (wr_count - rd_count);
  119. }
  120. static inline void ivc_advance_tx(struct ivc *ivc)
  121. {
  122. ivc->tx_channel->w_count++;
  123. if (ivc->w_pos == (ivc->nframes - (uint32_t)1U)) {
  124. ivc->w_pos = 0U;
  125. } else {
  126. ivc->w_pos++;
  127. }
  128. }
  129. static inline void ivc_advance_rx(struct ivc *ivc)
  130. {
  131. ivc->rx_channel->r_count++;
  132. if (ivc->r_pos == (ivc->nframes - (uint32_t)1U)) {
  133. ivc->r_pos = 0U;
  134. } else {
  135. ivc->r_pos++;
  136. }
  137. }
  138. static inline int32_t ivc_check_read(const struct ivc *ivc)
  139. {
  140. /*
  141. * tx_channel->state is set locally, so it is not synchronized with
  142. * state from the remote peer. The remote peer cannot reset its
  143. * transmit counters until we've acknowledged its synchronization
  144. * request, so no additional synchronization is required because an
  145. * asynchronous transition of rx_channel->state to ivc_state_ack is not
  146. * allowed.
  147. */
  148. if (ivc->tx_channel->state != ivc_state_established) {
  149. return -ECONNRESET;
  150. }
  151. /*
  152. * Avoid unnecessary invalidations when performing repeated accesses to
  153. * an IVC channel by checking the old queue pointers first.
  154. * Synchronization is only necessary when these pointers indicate empty
  155. * or full.
  156. */
  157. if (!ivc_channel_empty(ivc, ivc->rx_channel)) {
  158. return 0;
  159. }
  160. return ivc_channel_empty(ivc, ivc->rx_channel) ? -ENOMEM : 0;
  161. }
  162. static inline int32_t ivc_check_write(const struct ivc *ivc)
  163. {
  164. if (ivc->tx_channel->state != ivc_state_established) {
  165. return -ECONNRESET;
  166. }
  167. if (!ivc_channel_full(ivc, ivc->tx_channel)) {
  168. return 0;
  169. }
  170. return ivc_channel_full(ivc, ivc->tx_channel) ? -ENOMEM : 0;
  171. }
  172. bool tegra_ivc_can_read(const struct ivc *ivc)
  173. {
  174. return ivc_check_read(ivc) == 0;
  175. }
  176. bool tegra_ivc_can_write(const struct ivc *ivc)
  177. {
  178. return ivc_check_write(ivc) == 0;
  179. }
  180. bool tegra_ivc_tx_empty(const struct ivc *ivc)
  181. {
  182. return ivc_channel_empty(ivc, ivc->tx_channel);
  183. }
  184. static inline uintptr_t calc_frame_offset(uint32_t frame_index,
  185. uint32_t frame_size, uint32_t frame_offset)
  186. {
  187. return ((uintptr_t)frame_index * (uintptr_t)frame_size) +
  188. (uintptr_t)frame_offset;
  189. }
  190. static void *ivc_frame_pointer(const struct ivc *ivc,
  191. volatile const struct ivc_channel_header *ch,
  192. uint32_t frame)
  193. {
  194. assert(frame < ivc->nframes);
  195. return (void *)((uintptr_t)(&ch[1]) +
  196. calc_frame_offset(frame, ivc->frame_size, 0));
  197. }
  198. int32_t tegra_ivc_read(struct ivc *ivc, void *buf, size_t max_read)
  199. {
  200. const void *src;
  201. int32_t result;
  202. if (buf == NULL) {
  203. return -EINVAL;
  204. }
  205. if (max_read > ivc->frame_size) {
  206. return -E2BIG;
  207. }
  208. result = ivc_check_read(ivc);
  209. if (result != 0) {
  210. return result;
  211. }
  212. /*
  213. * Order observation of w_pos potentially indicating new data before
  214. * data read.
  215. */
  216. dmbish();
  217. src = ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
  218. (void)memcpy(buf, src, max_read);
  219. ivc_advance_rx(ivc);
  220. /*
  221. * Ensure our write to r_pos occurs before our read from w_pos.
  222. */
  223. dmbish();
  224. /*
  225. * Notify only upon transition from full to non-full.
  226. * The available count can only asynchronously increase, so the
  227. * worst possible side-effect will be a spurious notification.
  228. */
  229. if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
  230. ivc->notify(ivc);
  231. }
  232. return (int32_t)max_read;
  233. }
  234. /* directly peek at the next frame rx'ed */
  235. void *tegra_ivc_read_get_next_frame(const struct ivc *ivc)
  236. {
  237. if (ivc_check_read(ivc) != 0) {
  238. return NULL;
  239. }
  240. /*
  241. * Order observation of w_pos potentially indicating new data before
  242. * data read.
  243. */
  244. dmbld();
  245. return ivc_frame_pointer(ivc, ivc->rx_channel, ivc->r_pos);
  246. }
  247. int32_t tegra_ivc_read_advance(struct ivc *ivc)
  248. {
  249. /*
  250. * No read barriers or synchronization here: the caller is expected to
  251. * have already observed the channel non-empty. This check is just to
  252. * catch programming errors.
  253. */
  254. int32_t result = ivc_check_read(ivc);
  255. if (result != 0) {
  256. return result;
  257. }
  258. ivc_advance_rx(ivc);
  259. /*
  260. * Ensure our write to r_pos occurs before our read from w_pos.
  261. */
  262. dmbish();
  263. /*
  264. * Notify only upon transition from full to non-full.
  265. * The available count can only asynchronously increase, so the
  266. * worst possible side-effect will be a spurious notification.
  267. */
  268. if (ivc_channel_avail_count(ivc, ivc->rx_channel) == (ivc->nframes - (uint32_t)1U)) {
  269. ivc->notify(ivc);
  270. }
  271. return 0;
  272. }
  273. int32_t tegra_ivc_write(struct ivc *ivc, const void *buf, size_t size)
  274. {
  275. void *p;
  276. int32_t result;
  277. if ((buf == NULL) || (ivc == NULL)) {
  278. return -EINVAL;
  279. }
  280. if (size > ivc->frame_size) {
  281. return -E2BIG;
  282. }
  283. result = ivc_check_write(ivc);
  284. if (result != 0) {
  285. return result;
  286. }
  287. p = ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
  288. (void)memset(p, 0, ivc->frame_size);
  289. (void)memcpy(p, buf, size);
  290. /*
  291. * Ensure that updated data is visible before the w_pos counter
  292. * indicates that it is ready.
  293. */
  294. dmbst();
  295. ivc_advance_tx(ivc);
  296. /*
  297. * Ensure our write to w_pos occurs before our read from r_pos.
  298. */
  299. dmbish();
  300. /*
  301. * Notify only upon transition from empty to non-empty.
  302. * The available count can only asynchronously decrease, so the
  303. * worst possible side-effect will be a spurious notification.
  304. */
  305. if (ivc_channel_avail_count(ivc, ivc->tx_channel) == 1U) {
  306. ivc->notify(ivc);
  307. }
  308. return (int32_t)size;
  309. }
  310. /* directly poke at the next frame to be tx'ed */
  311. void *tegra_ivc_write_get_next_frame(const struct ivc *ivc)
  312. {
  313. if (ivc_check_write(ivc) != 0) {
  314. return NULL;
  315. }
  316. return ivc_frame_pointer(ivc, ivc->tx_channel, ivc->w_pos);
  317. }
  318. /* advance the tx buffer */
  319. int32_t tegra_ivc_write_advance(struct ivc *ivc)
  320. {
  321. int32_t result = ivc_check_write(ivc);
  322. if (result != 0) {
  323. return result;
  324. }
  325. /*
  326. * Order any possible stores to the frame before update of w_pos.
  327. */
  328. dmbst();
  329. ivc_advance_tx(ivc);
  330. /*
  331. * Ensure our write to w_pos occurs before our read from r_pos.
  332. */
  333. dmbish();
  334. /*
  335. * Notify only upon transition from empty to non-empty.
  336. * The available count can only asynchronously decrease, so the
  337. * worst possible side-effect will be a spurious notification.
  338. */
  339. if (ivc_channel_avail_count(ivc, ivc->tx_channel) == (uint32_t)1U) {
  340. ivc->notify(ivc);
  341. }
  342. return 0;
  343. }
  344. void tegra_ivc_channel_reset(const struct ivc *ivc)
  345. {
  346. ivc->tx_channel->state = ivc_state_sync;
  347. ivc->notify(ivc);
  348. }
  349. /*
  350. * ===============================================================
  351. * IVC State Transition Table - see tegra_ivc_channel_notified()
  352. * ===============================================================
  353. *
  354. * local remote action
  355. * ----- ------ -----------------------------------
  356. * SYNC EST <none>
  357. * SYNC ACK reset counters; move to EST; notify
  358. * SYNC SYNC reset counters; move to ACK; notify
  359. * ACK EST move to EST; notify
  360. * ACK ACK move to EST; notify
  361. * ACK SYNC reset counters; move to ACK; notify
  362. * EST EST <none>
  363. * EST ACK <none>
  364. * EST SYNC reset counters; move to ACK; notify
  365. *
  366. * ===============================================================
  367. */
  368. int32_t tegra_ivc_channel_notified(struct ivc *ivc)
  369. {
  370. uint32_t peer_state;
  371. /* Copy the receiver's state out of shared memory. */
  372. peer_state = ivc->rx_channel->state;
  373. if (peer_state == (uint32_t)ivc_state_sync) {
  374. /*
  375. * Order observation of ivc_state_sync before stores clearing
  376. * tx_channel.
  377. */
  378. dmbld();
  379. /*
  380. * Reset tx_channel counters. The remote end is in the SYNC
  381. * state and won't make progress until we change our state,
  382. * so the counters are not in use at this time.
  383. */
  384. ivc->tx_channel->w_count = 0U;
  385. ivc->rx_channel->r_count = 0U;
  386. ivc->w_pos = 0U;
  387. ivc->r_pos = 0U;
  388. /*
  389. * Ensure that counters appear cleared before new state can be
  390. * observed.
  391. */
  392. dmbst();
  393. /*
  394. * Move to ACK state. We have just cleared our counters, so it
  395. * is now safe for the remote end to start using these values.
  396. */
  397. ivc->tx_channel->state = ivc_state_ack;
  398. /*
  399. * Notify remote end to observe state transition.
  400. */
  401. ivc->notify(ivc);
  402. } else if ((ivc->tx_channel->state == (uint32_t)ivc_state_sync) &&
  403. (peer_state == (uint32_t)ivc_state_ack)) {
  404. /*
  405. * Order observation of ivc_state_sync before stores clearing
  406. * tx_channel.
  407. */
  408. dmbld();
  409. /*
  410. * Reset tx_channel counters. The remote end is in the ACK
  411. * state and won't make progress until we change our state,
  412. * so the counters are not in use at this time.
  413. */
  414. ivc->tx_channel->w_count = 0U;
  415. ivc->rx_channel->r_count = 0U;
  416. ivc->w_pos = 0U;
  417. ivc->r_pos = 0U;
  418. /*
  419. * Ensure that counters appear cleared before new state can be
  420. * observed.
  421. */
  422. dmbst();
  423. /*
  424. * Move to ESTABLISHED state. We know that the remote end has
  425. * already cleared its counters, so it is safe to start
  426. * writing/reading on this channel.
  427. */
  428. ivc->tx_channel->state = ivc_state_established;
  429. /*
  430. * Notify remote end to observe state transition.
  431. */
  432. ivc->notify(ivc);
  433. } else if (ivc->tx_channel->state == (uint32_t)ivc_state_ack) {
  434. /*
  435. * At this point, we have observed the peer to be in either
  436. * the ACK or ESTABLISHED state. Next, order observation of
  437. * peer state before storing to tx_channel.
  438. */
  439. dmbld();
  440. /*
  441. * Move to ESTABLISHED state. We know that we have previously
  442. * cleared our counters, and we know that the remote end has
  443. * cleared its counters, so it is safe to start writing/reading
  444. * on this channel.
  445. */
  446. ivc->tx_channel->state = ivc_state_established;
  447. /*
  448. * Notify remote end to observe state transition.
  449. */
  450. ivc->notify(ivc);
  451. } else {
  452. /*
  453. * There is no need to handle any further action. Either the
  454. * channel is already fully established, or we are waiting for
  455. * the remote end to catch up with our current state. Refer
  456. * to the diagram in "IVC State Transition Table" above.
  457. */
  458. }
  459. return ((ivc->tx_channel->state == (uint32_t)ivc_state_established) ? 0 : -EAGAIN);
  460. }
  461. size_t tegra_ivc_align(size_t size)
  462. {
  463. return (size + (IVC_ALIGN - 1U)) & ~(IVC_ALIGN - 1U);
  464. }
  465. size_t tegra_ivc_total_queue_size(size_t queue_size)
  466. {
  467. if ((queue_size & (IVC_ALIGN - 1U)) != 0U) {
  468. ERROR("queue_size (%d) must be %d-byte aligned\n",
  469. (int32_t)queue_size, IVC_ALIGN);
  470. return 0;
  471. }
  472. return queue_size + sizeof(struct ivc_channel_header);
  473. }
  474. static int32_t check_ivc_params(uintptr_t queue_base1, uintptr_t queue_base2,
  475. uint32_t nframes, uint32_t frame_size)
  476. {
  477. assert((offsetof(struct ivc_channel_header, w_count)
  478. & (IVC_ALIGN - 1U)) == 0U);
  479. assert((offsetof(struct ivc_channel_header, r_count)
  480. & (IVC_ALIGN - 1U)) == 0U);
  481. assert((sizeof(struct ivc_channel_header) & (IVC_ALIGN - 1U)) == 0U);
  482. if (((uint64_t)nframes * (uint64_t)frame_size) >= 0x100000000ULL) {
  483. ERROR("nframes * frame_size overflows\n");
  484. return -EINVAL;
  485. }
  486. /*
  487. * The headers must at least be aligned enough for counters
  488. * to be accessed atomically.
  489. */
  490. if ((queue_base1 & (IVC_ALIGN - 1U)) != 0U) {
  491. ERROR("ivc channel start not aligned: %lx\n", queue_base1);
  492. return -EINVAL;
  493. }
  494. if ((queue_base2 & (IVC_ALIGN - 1U)) != 0U) {
  495. ERROR("ivc channel start not aligned: %lx\n", queue_base2);
  496. return -EINVAL;
  497. }
  498. if ((frame_size & (IVC_ALIGN - 1U)) != 0U) {
  499. ERROR("frame size not adequately aligned: %u\n",
  500. frame_size);
  501. return -EINVAL;
  502. }
  503. if (queue_base1 < queue_base2) {
  504. if ((queue_base1 + ((uint64_t)frame_size * nframes)) > queue_base2) {
  505. ERROR("queue regions overlap: %lx + %x, %x\n",
  506. queue_base1, frame_size,
  507. frame_size * nframes);
  508. return -EINVAL;
  509. }
  510. } else {
  511. if ((queue_base2 + ((uint64_t)frame_size * nframes)) > queue_base1) {
  512. ERROR("queue regions overlap: %lx + %x, %x\n",
  513. queue_base2, frame_size,
  514. frame_size * nframes);
  515. return -EINVAL;
  516. }
  517. }
  518. return 0;
  519. }
  520. int32_t tegra_ivc_init(struct ivc *ivc, uintptr_t rx_base, uintptr_t tx_base,
  521. uint32_t nframes, uint32_t frame_size,
  522. ivc_notify_function notify)
  523. {
  524. int32_t result;
  525. /* sanity check input params */
  526. if ((ivc == NULL) || (notify == NULL)) {
  527. return -EINVAL;
  528. }
  529. result = check_ivc_params(rx_base, tx_base, nframes, frame_size);
  530. if (result != 0) {
  531. return result;
  532. }
  533. /*
  534. * All sizes that can be returned by communication functions should
  535. * fit in a 32-bit integer.
  536. */
  537. if (frame_size > (1u << 31)) {
  538. return -E2BIG;
  539. }
  540. ivc->rx_channel = (struct ivc_channel_header *)rx_base;
  541. ivc->tx_channel = (struct ivc_channel_header *)tx_base;
  542. ivc->notify = notify;
  543. ivc->frame_size = frame_size;
  544. ivc->nframes = nframes;
  545. ivc->w_pos = 0U;
  546. ivc->r_pos = 0U;
  547. INFO("%s: done\n", __func__);
  548. return 0;
  549. }