spmc_shared_mem.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934
  1. /*
  2. * Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <inttypes.h>
  9. #include <common/debug.h>
  10. #include <common/runtime_svc.h>
  11. #include <lib/object_pool.h>
  12. #include <lib/spinlock.h>
  13. #include <lib/xlat_tables/xlat_tables_v2.h>
  14. #include <services/ffa_svc.h>
  15. #include "spmc.h"
  16. #include "spmc_shared_mem.h"
  17. #include <platform_def.h>
  18. /**
  19. * struct spmc_shmem_obj - Shared memory object.
  20. * @desc_size: Size of @desc.
  21. * @desc_filled: Size of @desc already received.
  22. * @in_use: Number of clients that have called ffa_mem_retrieve_req
  23. * without a matching ffa_mem_relinquish call.
  24. * @desc: FF-A memory region descriptor passed in ffa_mem_share.
  25. */
  26. struct spmc_shmem_obj {
  27. size_t desc_size;
  28. size_t desc_filled;
  29. size_t in_use;
  30. struct ffa_mtd desc;
  31. };
  32. /*
  33. * Declare our data structure to store the metadata of memory share requests.
  34. * The main datastore is allocated on a per platform basis to ensure enough
  35. * storage can be made available.
  36. * The address of the data store will be populated by the SPMC during its
  37. * initialization.
  38. */
  39. struct spmc_shmem_obj_state spmc_shmem_obj_state = {
  40. /* Set start value for handle so top 32 bits are needed quickly. */
  41. .next_handle = 0xffffffc0U,
  42. };
  43. /**
  44. * spmc_shmem_obj_size - Convert from descriptor size to object size.
  45. * @desc_size: Size of struct ffa_memory_region_descriptor object.
  46. *
  47. * Return: Size of struct spmc_shmem_obj object.
  48. */
  49. static size_t spmc_shmem_obj_size(size_t desc_size)
  50. {
  51. return desc_size + offsetof(struct spmc_shmem_obj, desc);
  52. }
  53. /**
  54. * spmc_shmem_obj_alloc - Allocate struct spmc_shmem_obj.
  55. * @state: Global state.
  56. * @desc_size: Size of struct ffa_memory_region_descriptor object that
  57. * allocated object will hold.
  58. *
  59. * Return: Pointer to newly allocated object, or %NULL if there not enough space
  60. * left. The returned pointer is only valid while @state is locked, to
  61. * used it again after unlocking @state, spmc_shmem_obj_lookup must be
  62. * called.
  63. */
  64. static struct spmc_shmem_obj *
  65. spmc_shmem_obj_alloc(struct spmc_shmem_obj_state *state, size_t desc_size)
  66. {
  67. struct spmc_shmem_obj *obj;
  68. size_t free = state->data_size - state->allocated;
  69. size_t obj_size;
  70. if (state->data == NULL) {
  71. ERROR("Missing shmem datastore!\n");
  72. return NULL;
  73. }
  74. /* Ensure that descriptor size is aligned */
  75. if (!is_aligned(desc_size, 16)) {
  76. WARN("%s(0x%zx) desc_size not 16-byte aligned\n",
  77. __func__, desc_size);
  78. return NULL;
  79. }
  80. obj_size = spmc_shmem_obj_size(desc_size);
  81. /* Ensure the obj size has not overflowed. */
  82. if (obj_size < desc_size) {
  83. WARN("%s(0x%zx) desc_size overflow\n",
  84. __func__, desc_size);
  85. return NULL;
  86. }
  87. if (obj_size > free) {
  88. WARN("%s(0x%zx) failed, free 0x%zx\n",
  89. __func__, desc_size, free);
  90. return NULL;
  91. }
  92. obj = (struct spmc_shmem_obj *)(state->data + state->allocated);
  93. obj->desc = (struct ffa_mtd) {0};
  94. obj->desc_size = desc_size;
  95. obj->desc_filled = 0;
  96. obj->in_use = 0;
  97. state->allocated += obj_size;
  98. return obj;
  99. }
  100. /**
  101. * spmc_shmem_obj_free - Free struct spmc_shmem_obj.
  102. * @state: Global state.
  103. * @obj: Object to free.
  104. *
  105. * Release memory used by @obj. Other objects may move, so on return all
  106. * pointers to struct spmc_shmem_obj object should be considered invalid, not
  107. * just @obj.
  108. *
  109. * The current implementation always compacts the remaining objects to simplify
  110. * the allocator and to avoid fragmentation.
  111. */
  112. static void spmc_shmem_obj_free(struct spmc_shmem_obj_state *state,
  113. struct spmc_shmem_obj *obj)
  114. {
  115. size_t free_size = spmc_shmem_obj_size(obj->desc_size);
  116. uint8_t *shift_dest = (uint8_t *)obj;
  117. uint8_t *shift_src = shift_dest + free_size;
  118. size_t shift_size = state->allocated - (shift_src - state->data);
  119. if (shift_size != 0U) {
  120. memmove(shift_dest, shift_src, shift_size);
  121. }
  122. state->allocated -= free_size;
  123. }
  124. /**
  125. * spmc_shmem_obj_lookup - Lookup struct spmc_shmem_obj by handle.
  126. * @state: Global state.
  127. * @handle: Unique handle of object to return.
  128. *
  129. * Return: struct spmc_shmem_obj_state object with handle matching @handle.
  130. * %NULL, if not object in @state->data has a matching handle.
  131. */
  132. static struct spmc_shmem_obj *
  133. spmc_shmem_obj_lookup(struct spmc_shmem_obj_state *state, uint64_t handle)
  134. {
  135. uint8_t *curr = state->data;
  136. while (curr - state->data < state->allocated) {
  137. struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
  138. if (obj->desc.handle == handle) {
  139. return obj;
  140. }
  141. curr += spmc_shmem_obj_size(obj->desc_size);
  142. }
  143. return NULL;
  144. }
  145. /**
  146. * spmc_shmem_obj_get_next - Get the next memory object from an offset.
  147. * @offset: Offset used to track which objects have previously been
  148. * returned.
  149. *
  150. * Return: the next struct spmc_shmem_obj_state object from the provided
  151. * offset.
  152. * %NULL, if there are no more objects.
  153. */
  154. static struct spmc_shmem_obj *
  155. spmc_shmem_obj_get_next(struct spmc_shmem_obj_state *state, size_t *offset)
  156. {
  157. uint8_t *curr = state->data + *offset;
  158. if (curr - state->data < state->allocated) {
  159. struct spmc_shmem_obj *obj = (struct spmc_shmem_obj *)curr;
  160. *offset += spmc_shmem_obj_size(obj->desc_size);
  161. return obj;
  162. }
  163. return NULL;
  164. }
  165. /*******************************************************************************
  166. * FF-A memory descriptor helper functions.
  167. ******************************************************************************/
  168. /**
  169. * spmc_shmem_obj_get_emad - Get the emad from a given index depending on the
  170. * clients FF-A version.
  171. * @desc: The memory transaction descriptor.
  172. * @index: The index of the emad element to be accessed.
  173. * @ffa_version: FF-A version of the provided structure.
  174. * @emad_size: Will be populated with the size of the returned emad
  175. * descriptor.
  176. * Return: A pointer to the requested emad structure.
  177. */
  178. static void *
  179. spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
  180. uint32_t ffa_version, size_t *emad_size)
  181. {
  182. uint8_t *emad;
  183. assert(index < desc->emad_count);
  184. /*
  185. * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
  186. * format, otherwise assume it is a v1.1 format.
  187. */
  188. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  189. emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
  190. *emad_size = sizeof(struct ffa_emad_v1_0);
  191. } else {
  192. assert(is_aligned(desc->emad_offset, 16));
  193. emad = ((uint8_t *) desc + desc->emad_offset);
  194. *emad_size = desc->emad_size;
  195. }
  196. assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
  197. return (emad + (*emad_size * index));
  198. }
  199. /**
  200. * spmc_shmem_obj_get_comp_mrd - Get comp_mrd from a mtd struct based on the
  201. * FF-A version of the descriptor.
  202. * @obj: Object containing ffa_memory_region_descriptor.
  203. *
  204. * Return: struct ffa_comp_mrd object corresponding to the composite memory
  205. * region descriptor.
  206. */
  207. static struct ffa_comp_mrd *
  208. spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
  209. {
  210. size_t emad_size;
  211. /*
  212. * The comp_mrd_offset field of the emad descriptor remains consistent
  213. * between FF-A versions therefore we can use the v1.0 descriptor here
  214. * in all cases.
  215. */
  216. struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
  217. ffa_version,
  218. &emad_size);
  219. /* Ensure the composite descriptor offset is aligned. */
  220. if (!is_aligned(emad->comp_mrd_offset, 8)) {
  221. WARN("Unaligned composite memory region descriptor offset.\n");
  222. return NULL;
  223. }
  224. return (struct ffa_comp_mrd *)
  225. ((uint8_t *)(&obj->desc) + emad->comp_mrd_offset);
  226. }
  227. /**
  228. * spmc_shmem_obj_validate_id - Validate a partition ID is participating in
  229. * a given memory transaction.
  230. * @sp_id: Partition ID to validate.
  231. * @obj: The shared memory object containing the descriptor
  232. * of the memory transaction.
  233. * Return: true if ID is valid, else false.
  234. */
  235. bool spmc_shmem_obj_validate_id(struct spmc_shmem_obj *obj, uint16_t sp_id)
  236. {
  237. bool found = false;
  238. struct ffa_mtd *desc = &obj->desc;
  239. size_t desc_size = obj->desc_size;
  240. /* Validate the partition is a valid participant. */
  241. for (unsigned int i = 0U; i < desc->emad_count; i++) {
  242. size_t emad_size;
  243. struct ffa_emad_v1_0 *emad;
  244. emad = spmc_shmem_obj_get_emad(desc, i,
  245. MAKE_FFA_VERSION(1, 1),
  246. &emad_size);
  247. /*
  248. * Validate the calculated emad address resides within the
  249. * descriptor.
  250. */
  251. if ((emad == NULL) || (uintptr_t) emad >=
  252. (uintptr_t)((uint8_t *) desc + desc_size)) {
  253. VERBOSE("Invalid emad.\n");
  254. break;
  255. }
  256. if (sp_id == emad->mapd.endpoint_id) {
  257. found = true;
  258. break;
  259. }
  260. }
  261. return found;
  262. }
  263. /*
  264. * Compare two memory regions to determine if any range overlaps with another
  265. * ongoing memory transaction.
  266. */
  267. static bool
  268. overlapping_memory_regions(struct ffa_comp_mrd *region1,
  269. struct ffa_comp_mrd *region2)
  270. {
  271. uint64_t region1_start;
  272. uint64_t region1_size;
  273. uint64_t region1_end;
  274. uint64_t region2_start;
  275. uint64_t region2_size;
  276. uint64_t region2_end;
  277. assert(region1 != NULL);
  278. assert(region2 != NULL);
  279. if (region1 == region2) {
  280. return true;
  281. }
  282. /*
  283. * Check each memory region in the request against existing
  284. * transactions.
  285. */
  286. for (size_t i = 0; i < region1->address_range_count; i++) {
  287. region1_start = region1->address_range_array[i].address;
  288. region1_size =
  289. region1->address_range_array[i].page_count *
  290. PAGE_SIZE_4KB;
  291. region1_end = region1_start + region1_size;
  292. for (size_t j = 0; j < region2->address_range_count; j++) {
  293. region2_start = region2->address_range_array[j].address;
  294. region2_size =
  295. region2->address_range_array[j].page_count *
  296. PAGE_SIZE_4KB;
  297. region2_end = region2_start + region2_size;
  298. /* Check if regions are not overlapping. */
  299. if (!((region2_end <= region1_start) ||
  300. (region1_end <= region2_start))) {
  301. WARN("Overlapping mem regions 0x%lx-0x%lx & 0x%lx-0x%lx\n",
  302. region1_start, region1_end,
  303. region2_start, region2_end);
  304. return true;
  305. }
  306. }
  307. }
  308. return false;
  309. }
  310. /*******************************************************************************
  311. * FF-A v1.0 Memory Descriptor Conversion Helpers.
  312. ******************************************************************************/
  313. /**
  314. * spmc_shm_get_v1_1_descriptor_size - Calculate the required size for a v1.1
  315. * converted descriptor.
  316. * @orig: The original v1.0 memory transaction descriptor.
  317. * @desc_size: The size of the original v1.0 memory transaction descriptor.
  318. *
  319. * Return: the size required to store the descriptor store in the v1.1 format.
  320. */
  321. static uint64_t
  322. spmc_shm_get_v1_1_descriptor_size(struct ffa_mtd_v1_0 *orig, size_t desc_size)
  323. {
  324. uint64_t size = 0;
  325. struct ffa_comp_mrd *mrd;
  326. struct ffa_emad_v1_0 *emad_array = orig->emad;
  327. /* Get the size of the v1.1 descriptor. */
  328. size += sizeof(struct ffa_mtd);
  329. /* Add the size of the emad descriptors. */
  330. size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
  331. /* Add the size of the composite mrds. */
  332. size += sizeof(struct ffa_comp_mrd);
  333. /* Add the size of the constituent mrds. */
  334. mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
  335. emad_array[0].comp_mrd_offset);
  336. /* Add the size of the memory region descriptors. */
  337. size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
  338. return size;
  339. }
  340. /**
  341. * spmc_shm_get_v1_0_descriptor_size - Calculate the required size for a v1.0
  342. * converted descriptor.
  343. * @orig: The original v1.1 memory transaction descriptor.
  344. * @desc_size: The size of the original v1.1 memory transaction descriptor.
  345. *
  346. * Return: the size required to store the descriptor store in the v1.0 format.
  347. */
  348. static size_t
  349. spmc_shm_get_v1_0_descriptor_size(struct ffa_mtd *orig, size_t desc_size)
  350. {
  351. size_t size = 0;
  352. struct ffa_comp_mrd *mrd;
  353. struct ffa_emad_v1_0 *emad_array = (struct ffa_emad_v1_0 *)
  354. ((uint8_t *) orig +
  355. orig->emad_offset);
  356. /* Get the size of the v1.0 descriptor. */
  357. size += sizeof(struct ffa_mtd_v1_0);
  358. /* Add the size of the v1.0 emad descriptors. */
  359. size += orig->emad_count * sizeof(struct ffa_emad_v1_0);
  360. /* Add the size of the composite mrds. */
  361. size += sizeof(struct ffa_comp_mrd);
  362. /* Add the size of the constituent mrds. */
  363. mrd = (struct ffa_comp_mrd *) ((uint8_t *) orig +
  364. emad_array[0].comp_mrd_offset);
  365. /* Check the calculated address is within the memory descriptor. */
  366. if (((uintptr_t) mrd + sizeof(struct ffa_comp_mrd)) >
  367. (uintptr_t)((uint8_t *) orig + desc_size)) {
  368. return 0;
  369. }
  370. size += mrd->address_range_count * sizeof(struct ffa_cons_mrd);
  371. return size;
  372. }
  373. /**
  374. * spmc_shm_convert_shmem_obj_from_v1_0 - Converts a given v1.0 memory object.
  375. * @out_obj: The shared memory object to populate the converted descriptor.
  376. * @orig: The shared memory object containing the v1.0 descriptor.
  377. *
  378. * Return: true if the conversion is successful else false.
  379. */
  380. static bool
  381. spmc_shm_convert_shmem_obj_from_v1_0(struct spmc_shmem_obj *out_obj,
  382. struct spmc_shmem_obj *orig)
  383. {
  384. struct ffa_mtd_v1_0 *mtd_orig = (struct ffa_mtd_v1_0 *) &orig->desc;
  385. struct ffa_mtd *out = &out_obj->desc;
  386. struct ffa_emad_v1_0 *emad_array_in;
  387. struct ffa_emad_v1_0 *emad_array_out;
  388. struct ffa_comp_mrd *mrd_in;
  389. struct ffa_comp_mrd *mrd_out;
  390. size_t mrd_in_offset;
  391. size_t mrd_out_offset;
  392. size_t mrd_size = 0;
  393. /* Populate the new descriptor format from the v1.0 struct. */
  394. out->sender_id = mtd_orig->sender_id;
  395. out->memory_region_attributes = mtd_orig->memory_region_attributes;
  396. out->flags = mtd_orig->flags;
  397. out->handle = mtd_orig->handle;
  398. out->tag = mtd_orig->tag;
  399. out->emad_count = mtd_orig->emad_count;
  400. out->emad_size = sizeof(struct ffa_emad_v1_0);
  401. /*
  402. * We will locate the emad descriptors directly after the ffa_mtd
  403. * struct. This will be 8-byte aligned.
  404. */
  405. out->emad_offset = sizeof(struct ffa_mtd);
  406. emad_array_in = mtd_orig->emad;
  407. emad_array_out = (struct ffa_emad_v1_0 *)
  408. ((uint8_t *) out + out->emad_offset);
  409. /* Copy across the emad structs. */
  410. for (unsigned int i = 0U; i < out->emad_count; i++) {
  411. /* Bound check for emad array. */
  412. if (((uint8_t *)emad_array_in + sizeof(struct ffa_emad_v1_0)) >
  413. ((uint8_t *) mtd_orig + orig->desc_size)) {
  414. VERBOSE("%s: Invalid mtd structure.\n", __func__);
  415. return false;
  416. }
  417. memcpy(&emad_array_out[i], &emad_array_in[i],
  418. sizeof(struct ffa_emad_v1_0));
  419. }
  420. /* Place the mrd descriptors after the end of the emad descriptors.*/
  421. mrd_in_offset = emad_array_in->comp_mrd_offset;
  422. mrd_out_offset = out->emad_offset + (out->emad_size * out->emad_count);
  423. mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
  424. /* Add the size of the composite memory region descriptor. */
  425. mrd_size += sizeof(struct ffa_comp_mrd);
  426. /* Find the mrd descriptor. */
  427. mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
  428. /* Add the size of the constituent memory region descriptors. */
  429. mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
  430. /*
  431. * Update the offset in the emads by the delta between the input and
  432. * output addresses.
  433. */
  434. for (unsigned int i = 0U; i < out->emad_count; i++) {
  435. emad_array_out[i].comp_mrd_offset =
  436. emad_array_in[i].comp_mrd_offset +
  437. (mrd_out_offset - mrd_in_offset);
  438. }
  439. /* Verify that we stay within bound of the memory descriptors. */
  440. if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
  441. (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
  442. ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
  443. (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
  444. ERROR("%s: Invalid mrd structure.\n", __func__);
  445. return false;
  446. }
  447. /* Copy the mrd descriptors directly. */
  448. memcpy(mrd_out, mrd_in, mrd_size);
  449. return true;
  450. }
  451. /**
  452. * spmc_shm_convert_mtd_to_v1_0 - Converts a given v1.1 memory object to
  453. * v1.0 memory object.
  454. * @out_obj: The shared memory object to populate the v1.0 descriptor.
  455. * @orig: The shared memory object containing the v1.1 descriptor.
  456. *
  457. * Return: true if the conversion is successful else false.
  458. */
  459. static bool
  460. spmc_shm_convert_mtd_to_v1_0(struct spmc_shmem_obj *out_obj,
  461. struct spmc_shmem_obj *orig)
  462. {
  463. struct ffa_mtd *mtd_orig = &orig->desc;
  464. struct ffa_mtd_v1_0 *out = (struct ffa_mtd_v1_0 *) &out_obj->desc;
  465. struct ffa_emad_v1_0 *emad_in;
  466. struct ffa_emad_v1_0 *emad_array_in;
  467. struct ffa_emad_v1_0 *emad_array_out;
  468. struct ffa_comp_mrd *mrd_in;
  469. struct ffa_comp_mrd *mrd_out;
  470. size_t mrd_in_offset;
  471. size_t mrd_out_offset;
  472. size_t emad_out_array_size;
  473. size_t mrd_size = 0;
  474. size_t orig_desc_size = orig->desc_size;
  475. /* Populate the v1.0 descriptor format from the v1.1 struct. */
  476. out->sender_id = mtd_orig->sender_id;
  477. out->memory_region_attributes = mtd_orig->memory_region_attributes;
  478. out->flags = mtd_orig->flags;
  479. out->handle = mtd_orig->handle;
  480. out->tag = mtd_orig->tag;
  481. out->emad_count = mtd_orig->emad_count;
  482. /* Determine the location of the emad array in both descriptors. */
  483. emad_array_in = (struct ffa_emad_v1_0 *)
  484. ((uint8_t *) mtd_orig + mtd_orig->emad_offset);
  485. emad_array_out = out->emad;
  486. /* Copy across the emad structs. */
  487. emad_in = emad_array_in;
  488. for (unsigned int i = 0U; i < out->emad_count; i++) {
  489. /* Bound check for emad array. */
  490. if (((uint8_t *)emad_in + sizeof(struct ffa_emad_v1_0)) >
  491. ((uint8_t *) mtd_orig + orig_desc_size)) {
  492. VERBOSE("%s: Invalid mtd structure.\n", __func__);
  493. return false;
  494. }
  495. memcpy(&emad_array_out[i], emad_in,
  496. sizeof(struct ffa_emad_v1_0));
  497. emad_in += mtd_orig->emad_size;
  498. }
  499. /* Place the mrd descriptors after the end of the emad descriptors. */
  500. emad_out_array_size = sizeof(struct ffa_emad_v1_0) * out->emad_count;
  501. mrd_out_offset = (uint8_t *) out->emad - (uint8_t *) out +
  502. emad_out_array_size;
  503. mrd_out = (struct ffa_comp_mrd *) ((uint8_t *) out + mrd_out_offset);
  504. mrd_in_offset = mtd_orig->emad_offset +
  505. (mtd_orig->emad_size * mtd_orig->emad_count);
  506. /* Add the size of the composite memory region descriptor. */
  507. mrd_size += sizeof(struct ffa_comp_mrd);
  508. /* Find the mrd descriptor. */
  509. mrd_in = (struct ffa_comp_mrd *) ((uint8_t *) mtd_orig + mrd_in_offset);
  510. /* Add the size of the constituent memory region descriptors. */
  511. mrd_size += mrd_in->address_range_count * sizeof(struct ffa_cons_mrd);
  512. /*
  513. * Update the offset in the emads by the delta between the input and
  514. * output addresses.
  515. */
  516. emad_in = emad_array_in;
  517. for (unsigned int i = 0U; i < out->emad_count; i++) {
  518. emad_array_out[i].comp_mrd_offset = emad_in->comp_mrd_offset +
  519. (mrd_out_offset -
  520. mrd_in_offset);
  521. emad_in += mtd_orig->emad_size;
  522. }
  523. /* Verify that we stay within bound of the memory descriptors. */
  524. if ((uintptr_t)((uint8_t *) mrd_in + mrd_size) >
  525. (uintptr_t)((uint8_t *) mtd_orig + orig->desc_size) ||
  526. ((uintptr_t)((uint8_t *) mrd_out + mrd_size) >
  527. (uintptr_t)((uint8_t *) out + out_obj->desc_size))) {
  528. ERROR("%s: Invalid mrd structure.\n", __func__);
  529. return false;
  530. }
  531. /* Copy the mrd descriptors directly. */
  532. memcpy(mrd_out, mrd_in, mrd_size);
  533. return true;
  534. }
  535. /**
  536. * spmc_populate_ffa_v1_0_descriptor - Converts a given v1.1 memory object to
  537. * the v1.0 format and populates the
  538. * provided buffer.
  539. * @dst: Buffer to populate v1.0 ffa_memory_region_descriptor.
  540. * @orig_obj: Object containing v1.1 ffa_memory_region_descriptor.
  541. * @buf_size: Size of the buffer to populate.
  542. * @offset: The offset of the converted descriptor to copy.
  543. * @copy_size: Will be populated with the number of bytes copied.
  544. * @out_desc_size: Will be populated with the total size of the v1.0
  545. * descriptor.
  546. *
  547. * Return: 0 if conversion and population succeeded.
  548. * Note: This function invalidates the reference to @orig therefore
  549. * `spmc_shmem_obj_lookup` must be called if further usage is required.
  550. */
  551. static uint32_t
  552. spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
  553. size_t buf_size, size_t offset,
  554. size_t *copy_size, size_t *v1_0_desc_size)
  555. {
  556. struct spmc_shmem_obj *v1_0_obj;
  557. /* Calculate the size that the v1.0 descriptor will require. */
  558. *v1_0_desc_size = spmc_shm_get_v1_0_descriptor_size(
  559. &orig_obj->desc, orig_obj->desc_size);
  560. if (*v1_0_desc_size == 0) {
  561. ERROR("%s: cannot determine size of descriptor.\n",
  562. __func__);
  563. return FFA_ERROR_INVALID_PARAMETER;
  564. }
  565. /* Get a new obj to store the v1.0 descriptor. */
  566. v1_0_obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state,
  567. *v1_0_desc_size);
  568. if (!v1_0_obj) {
  569. return FFA_ERROR_NO_MEMORY;
  570. }
  571. /* Perform the conversion from v1.1 to v1.0. */
  572. if (!spmc_shm_convert_mtd_to_v1_0(v1_0_obj, orig_obj)) {
  573. spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
  574. return FFA_ERROR_INVALID_PARAMETER;
  575. }
  576. *copy_size = MIN(v1_0_obj->desc_size - offset, buf_size);
  577. memcpy(dst, (uint8_t *) &v1_0_obj->desc + offset, *copy_size);
  578. /*
  579. * We're finished with the v1.0 descriptor for now so free it.
  580. * Note that this will invalidate any references to the v1.1
  581. * descriptor.
  582. */
  583. spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_0_obj);
  584. return 0;
  585. }
  586. static int
  587. spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
  588. size_t fragment_length, size_t total_length)
  589. {
  590. unsigned long long emad_end;
  591. unsigned long long emad_size;
  592. unsigned long long emad_offset;
  593. unsigned int min_desc_size;
  594. /* Determine the appropriate minimum descriptor size. */
  595. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  596. min_desc_size = sizeof(struct ffa_mtd_v1_0);
  597. } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
  598. min_desc_size = sizeof(struct ffa_mtd);
  599. } else {
  600. return FFA_ERROR_INVALID_PARAMETER;
  601. }
  602. if (fragment_length < min_desc_size) {
  603. WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
  604. min_desc_size);
  605. return FFA_ERROR_INVALID_PARAMETER;
  606. }
  607. if (desc->emad_count == 0U) {
  608. WARN("%s: unsupported attribute desc count %u.\n",
  609. __func__, desc->emad_count);
  610. return FFA_ERROR_INVALID_PARAMETER;
  611. }
  612. /*
  613. * If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
  614. * format, otherwise assume it is a v1.1 format.
  615. */
  616. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  617. emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
  618. } else {
  619. if (!is_aligned(desc->emad_offset, 16)) {
  620. WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
  621. __func__, desc->emad_offset);
  622. return FFA_ERROR_INVALID_PARAMETER;
  623. }
  624. if (desc->emad_offset < sizeof(struct ffa_mtd)) {
  625. WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
  626. __func__, desc->emad_offset,
  627. sizeof(struct ffa_mtd));
  628. return FFA_ERROR_INVALID_PARAMETER;
  629. }
  630. emad_offset = desc->emad_offset;
  631. if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
  632. WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
  633. desc->emad_size, sizeof(struct ffa_emad_v1_0));
  634. return FFA_ERROR_INVALID_PARAMETER;
  635. }
  636. if (!is_aligned(desc->emad_size, 16)) {
  637. WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
  638. __func__, desc->emad_size);
  639. return FFA_ERROR_INVALID_PARAMETER;
  640. }
  641. emad_size = desc->emad_size;
  642. }
  643. /*
  644. * Overflow is impossible: the arithmetic happens in at least 64-bit
  645. * precision, but all of the operands are bounded by UINT32_MAX, and
  646. * ((2^32 - 1) * (2^32 - 1) + (2^32 - 1) + (2^32 - 1))
  647. * = ((2^32 - 1) * ((2^32 - 1) + 1 + 1))
  648. * = ((2^32 - 1) * (2^32 + 1))
  649. * = (2^64 - 1).
  650. */
  651. CASSERT(sizeof(desc->emad_count) == 4, assert_emad_count_max_too_large);
  652. emad_end = (desc->emad_count * (unsigned long long)emad_size) +
  653. (unsigned long long)sizeof(struct ffa_comp_mrd) +
  654. (unsigned long long)emad_offset;
  655. if (emad_end > total_length) {
  656. WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
  657. __func__, emad_end, total_length);
  658. return FFA_ERROR_INVALID_PARAMETER;
  659. }
  660. return 0;
  661. }
  662. static inline const struct ffa_emad_v1_0 *
  663. emad_advance(const struct ffa_emad_v1_0 *emad, size_t offset)
  664. {
  665. return (const struct ffa_emad_v1_0 *)((const uint8_t *)emad + offset);
  666. }
  667. /**
  668. * spmc_shmem_check_obj - Check that counts in descriptor match overall size.
  669. * @obj: Object containing ffa_memory_region_descriptor.
  670. * @ffa_version: FF-A version of the provided descriptor.
  671. *
  672. * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if
  673. * constituent_memory_region_descriptor offset or count is invalid.
  674. */
  675. static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
  676. uint32_t ffa_version)
  677. {
  678. unsigned long long total_page_count;
  679. const struct ffa_emad_v1_0 *first_emad;
  680. const struct ffa_emad_v1_0 *end_emad;
  681. size_t emad_size;
  682. uint32_t comp_mrd_offset;
  683. size_t header_emad_size;
  684. size_t size;
  685. size_t count;
  686. size_t expected_size;
  687. const struct ffa_comp_mrd *comp;
  688. if (obj->desc_filled != obj->desc_size) {
  689. ERROR("BUG: %s called on incomplete object (%zu != %zu)\n",
  690. __func__, obj->desc_filled, obj->desc_size);
  691. panic();
  692. }
  693. if (spmc_validate_mtd_start(&obj->desc, ffa_version,
  694. obj->desc_filled, obj->desc_size)) {
  695. ERROR("BUG: %s called on object with corrupt memory region descriptor\n",
  696. __func__);
  697. panic();
  698. }
  699. first_emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
  700. ffa_version, &emad_size);
  701. end_emad = emad_advance(first_emad, obj->desc.emad_count * emad_size);
  702. comp_mrd_offset = first_emad->comp_mrd_offset;
  703. /* Loop through the endpoint descriptors, validating each of them. */
  704. for (const struct ffa_emad_v1_0 *emad = first_emad; emad < end_emad;) {
  705. ffa_endpoint_id16_t ep_id;
  706. /*
  707. * If a partition ID resides in the secure world validate that
  708. * the partition ID is for a known partition. Ignore any
  709. * partition ID belonging to the normal world as it is assumed
  710. * the Hypervisor will have validated these.
  711. */
  712. ep_id = emad->mapd.endpoint_id;
  713. if (ffa_is_secure_world_id(ep_id)) {
  714. if (spmc_get_sp_ctx(ep_id) == NULL) {
  715. WARN("%s: Invalid receiver id 0x%x\n",
  716. __func__, ep_id);
  717. return FFA_ERROR_INVALID_PARAMETER;
  718. }
  719. }
  720. /*
  721. * The offset provided to the composite memory region descriptor
  722. * should be consistent across endpoint descriptors.
  723. */
  724. if (comp_mrd_offset != emad->comp_mrd_offset) {
  725. ERROR("%s: mismatching offsets provided, %u != %u\n",
  726. __func__, emad->comp_mrd_offset, comp_mrd_offset);
  727. return FFA_ERROR_INVALID_PARAMETER;
  728. }
  729. /* Advance to the next endpoint descriptor */
  730. emad = emad_advance(emad, emad_size);
  731. /*
  732. * Ensure neither this emad nor any subsequent emads have
  733. * the same partition ID as the previous emad.
  734. */
  735. for (const struct ffa_emad_v1_0 *other_emad = emad;
  736. other_emad < end_emad;
  737. other_emad = emad_advance(other_emad, emad_size)) {
  738. if (ep_id == other_emad->mapd.endpoint_id) {
  739. WARN("%s: Duplicated endpoint id 0x%x\n",
  740. __func__, emad->mapd.endpoint_id);
  741. return FFA_ERROR_INVALID_PARAMETER;
  742. }
  743. }
  744. }
  745. header_emad_size = (size_t)((const uint8_t *)end_emad -
  746. (const uint8_t *)&obj->desc);
  747. /*
  748. * Check that the composite descriptor
  749. * is after the endpoint descriptors.
  750. */
  751. if (comp_mrd_offset < header_emad_size) {
  752. WARN("%s: invalid object, offset %u < header + emad %zu\n",
  753. __func__, comp_mrd_offset, header_emad_size);
  754. return FFA_ERROR_INVALID_PARAMETER;
  755. }
  756. /* Ensure the composite descriptor offset is aligned. */
  757. if (!is_aligned(comp_mrd_offset, 16)) {
  758. WARN("%s: invalid object, unaligned composite memory "
  759. "region descriptor offset %u.\n",
  760. __func__, comp_mrd_offset);
  761. return FFA_ERROR_INVALID_PARAMETER;
  762. }
  763. size = obj->desc_size;
  764. /* Check that the composite descriptor is in bounds. */
  765. if (comp_mrd_offset > size) {
  766. WARN("%s: invalid object, offset %u > total size %zu\n",
  767. __func__, comp_mrd_offset, obj->desc_size);
  768. return FFA_ERROR_INVALID_PARAMETER;
  769. }
  770. size -= comp_mrd_offset;
  771. /* Check that there is enough space for the composite descriptor. */
  772. if (size < sizeof(struct ffa_comp_mrd)) {
  773. WARN("%s: invalid object, offset %u, total size %zu, no header space.\n",
  774. __func__, comp_mrd_offset, obj->desc_size);
  775. return FFA_ERROR_INVALID_PARAMETER;
  776. }
  777. size -= sizeof(*comp);
  778. count = size / sizeof(struct ffa_cons_mrd);
  779. comp = (const struct ffa_comp_mrd *)
  780. ((const uint8_t *)(&obj->desc) + comp_mrd_offset);
  781. if (comp->address_range_count != count) {
  782. WARN("%s: invalid object, desc count %u != %zu\n",
  783. __func__, comp->address_range_count, count);
  784. return FFA_ERROR_INVALID_PARAMETER;
  785. }
  786. /* Ensure that the expected and actual sizes are equal. */
  787. expected_size = comp_mrd_offset + sizeof(*comp) +
  788. count * sizeof(struct ffa_cons_mrd);
  789. if (expected_size != obj->desc_size) {
  790. WARN("%s: invalid object, computed size %zu != size %zu\n",
  791. __func__, expected_size, obj->desc_size);
  792. return FFA_ERROR_INVALID_PARAMETER;
  793. }
  794. total_page_count = 0;
  795. /*
  796. * comp->address_range_count is 32-bit, so 'count' must fit in a
  797. * uint32_t at this point.
  798. */
  799. for (size_t i = 0; i < count; i++) {
  800. const struct ffa_cons_mrd *mrd = comp->address_range_array + i;
  801. if (!is_aligned(mrd->address, PAGE_SIZE)) {
  802. WARN("%s: invalid object, address in region descriptor "
  803. "%zu not 4K aligned (got 0x%016llx)",
  804. __func__, i, (unsigned long long)mrd->address);
  805. }
  806. /*
  807. * No overflow possible: total_page_count can hold at
  808. * least 2^64 - 1, but will be have at most 2^32 - 1.
  809. * values added to it, each of which cannot exceed 2^32 - 1.
  810. */
  811. total_page_count += mrd->page_count;
  812. }
  813. if (comp->total_page_count != total_page_count) {
  814. WARN("%s: invalid object, desc total_page_count %u != %llu\n",
  815. __func__, comp->total_page_count, total_page_count);
  816. return FFA_ERROR_INVALID_PARAMETER;
  817. }
  818. return 0;
  819. }
  820. /**
  821. * spmc_shmem_check_state_obj - Check if the descriptor describes memory
  822. * regions that are currently involved with an
  823. * existing memory transactions. This implies that
  824. * the memory is not in a valid state for lending.
  825. * @obj: Object containing ffa_memory_region_descriptor.
  826. *
  827. * Return: 0 if object is valid, FFA_ERROR_INVALID_PARAMETER if invalid memory
  828. * state.
  829. */
  830. static int spmc_shmem_check_state_obj(struct spmc_shmem_obj *obj,
  831. uint32_t ffa_version)
  832. {
  833. size_t obj_offset = 0;
  834. struct spmc_shmem_obj *inflight_obj;
  835. struct ffa_comp_mrd *other_mrd;
  836. struct ffa_comp_mrd *requested_mrd = spmc_shmem_obj_get_comp_mrd(obj,
  837. ffa_version);
  838. if (requested_mrd == NULL) {
  839. return FFA_ERROR_INVALID_PARAMETER;
  840. }
  841. inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
  842. &obj_offset);
  843. while (inflight_obj != NULL) {
  844. /*
  845. * Don't compare the transaction to itself or to partially
  846. * transmitted descriptors.
  847. */
  848. if ((obj->desc.handle != inflight_obj->desc.handle) &&
  849. (obj->desc_size == obj->desc_filled)) {
  850. other_mrd = spmc_shmem_obj_get_comp_mrd(inflight_obj,
  851. FFA_VERSION_COMPILED);
  852. if (other_mrd == NULL) {
  853. return FFA_ERROR_INVALID_PARAMETER;
  854. }
  855. if (overlapping_memory_regions(requested_mrd,
  856. other_mrd)) {
  857. return FFA_ERROR_INVALID_PARAMETER;
  858. }
  859. }
  860. inflight_obj = spmc_shmem_obj_get_next(&spmc_shmem_obj_state,
  861. &obj_offset);
  862. }
  863. return 0;
  864. }
  865. static long spmc_ffa_fill_desc(struct mailbox *mbox,
  866. struct spmc_shmem_obj *obj,
  867. uint32_t fragment_length,
  868. ffa_mtd_flag32_t mtd_flag,
  869. uint32_t ffa_version,
  870. void *smc_handle)
  871. {
  872. int ret;
  873. uint32_t handle_low;
  874. uint32_t handle_high;
  875. if (mbox->rxtx_page_count == 0U) {
  876. WARN("%s: buffer pair not registered.\n", __func__);
  877. ret = FFA_ERROR_INVALID_PARAMETER;
  878. goto err_arg;
  879. }
  880. CASSERT(sizeof(mbox->rxtx_page_count) == 4, assert_bogus_page_count);
  881. if (fragment_length > (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB) {
  882. WARN("%s: bad fragment size %u > %" PRIu64 " buffer size\n", __func__,
  883. fragment_length, (uint64_t)mbox->rxtx_page_count * PAGE_SIZE_4KB);
  884. ret = FFA_ERROR_INVALID_PARAMETER;
  885. goto err_arg;
  886. }
  887. if (fragment_length > obj->desc_size - obj->desc_filled) {
  888. WARN("%s: bad fragment size %u > %zu remaining\n", __func__,
  889. fragment_length, obj->desc_size - obj->desc_filled);
  890. ret = FFA_ERROR_INVALID_PARAMETER;
  891. goto err_arg;
  892. }
  893. memcpy((uint8_t *)&obj->desc + obj->desc_filled,
  894. (uint8_t *) mbox->tx_buffer, fragment_length);
  895. /* Ensure that the sender ID resides in the normal world. */
  896. if (ffa_is_secure_world_id(obj->desc.sender_id)) {
  897. WARN("%s: Invalid sender ID 0x%x.\n",
  898. __func__, obj->desc.sender_id);
  899. ret = FFA_ERROR_DENIED;
  900. goto err_arg;
  901. }
  902. /* Ensure the NS bit is set to 0. */
  903. if ((obj->desc.memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
  904. WARN("%s: NS mem attributes flags MBZ.\n", __func__);
  905. ret = FFA_ERROR_INVALID_PARAMETER;
  906. goto err_arg;
  907. }
  908. /*
  909. * We don't currently support any optional flags so ensure none are
  910. * requested.
  911. */
  912. if (obj->desc.flags != 0U && mtd_flag != 0U &&
  913. (obj->desc.flags != mtd_flag)) {
  914. WARN("%s: invalid memory transaction flags %u != %u\n",
  915. __func__, obj->desc.flags, mtd_flag);
  916. ret = FFA_ERROR_INVALID_PARAMETER;
  917. goto err_arg;
  918. }
  919. if (obj->desc_filled == 0U) {
  920. /* First fragment, descriptor header has been copied */
  921. ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
  922. fragment_length, obj->desc_size);
  923. if (ret != 0) {
  924. goto err_bad_desc;
  925. }
  926. obj->desc.handle = spmc_shmem_obj_state.next_handle++;
  927. obj->desc.flags |= mtd_flag;
  928. }
  929. obj->desc_filled += fragment_length;
  930. handle_low = (uint32_t)obj->desc.handle;
  931. handle_high = obj->desc.handle >> 32;
  932. if (obj->desc_filled != obj->desc_size) {
  933. SMC_RET8(smc_handle, FFA_MEM_FRAG_RX, handle_low,
  934. handle_high, obj->desc_filled,
  935. (uint32_t)obj->desc.sender_id << 16, 0, 0, 0);
  936. }
  937. /* The full descriptor has been received, perform any final checks. */
  938. ret = spmc_shmem_check_obj(obj, ffa_version);
  939. if (ret != 0) {
  940. goto err_bad_desc;
  941. }
  942. ret = spmc_shmem_check_state_obj(obj, ffa_version);
  943. if (ret) {
  944. ERROR("%s: invalid memory region descriptor.\n", __func__);
  945. goto err_bad_desc;
  946. }
  947. /*
  948. * Everything checks out, if the sender was using FF-A v1.0, convert
  949. * the descriptor format to use the v1.1 structures.
  950. */
  951. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  952. struct spmc_shmem_obj *v1_1_obj;
  953. uint64_t mem_handle;
  954. /* Calculate the size that the v1.1 descriptor will required. */
  955. uint64_t v1_1_desc_size =
  956. spmc_shm_get_v1_1_descriptor_size((void *) &obj->desc,
  957. obj->desc_size);
  958. if (v1_1_desc_size > UINT32_MAX) {
  959. ret = FFA_ERROR_NO_MEMORY;
  960. goto err_arg;
  961. }
  962. /* Get a new obj to store the v1.1 descriptor. */
  963. v1_1_obj =
  964. spmc_shmem_obj_alloc(&spmc_shmem_obj_state, (size_t)v1_1_desc_size);
  965. if (!v1_1_obj) {
  966. ret = FFA_ERROR_NO_MEMORY;
  967. goto err_arg;
  968. }
  969. /* Perform the conversion from v1.0 to v1.1. */
  970. v1_1_obj->desc_size = (uint32_t)v1_1_desc_size;
  971. v1_1_obj->desc_filled = (uint32_t)v1_1_desc_size;
  972. if (!spmc_shm_convert_shmem_obj_from_v1_0(v1_1_obj, obj)) {
  973. ERROR("%s: Could not convert mtd!\n", __func__);
  974. spmc_shmem_obj_free(&spmc_shmem_obj_state, v1_1_obj);
  975. goto err_arg;
  976. }
  977. /*
  978. * We're finished with the v1.0 descriptor so free it
  979. * and continue our checks with the new v1.1 descriptor.
  980. */
  981. mem_handle = obj->desc.handle;
  982. spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
  983. obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
  984. if (obj == NULL) {
  985. ERROR("%s: Failed to find converted descriptor.\n",
  986. __func__);
  987. ret = FFA_ERROR_INVALID_PARAMETER;
  988. return spmc_ffa_error_return(smc_handle, ret);
  989. }
  990. }
  991. /* Allow for platform specific operations to be performed. */
  992. ret = plat_spmc_shmem_begin(&obj->desc);
  993. if (ret != 0) {
  994. goto err_arg;
  995. }
  996. SMC_RET8(smc_handle, FFA_SUCCESS_SMC32, 0, handle_low, handle_high, 0,
  997. 0, 0, 0);
  998. err_bad_desc:
  999. err_arg:
  1000. spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
  1001. return spmc_ffa_error_return(smc_handle, ret);
  1002. }
  1003. /**
  1004. * spmc_ffa_mem_send - FFA_MEM_SHARE/LEND implementation.
  1005. * @client: Client state.
  1006. * @total_length: Total length of shared memory descriptor.
  1007. * @fragment_length: Length of fragment of shared memory descriptor passed in
  1008. * this call.
  1009. * @address: Not supported, must be 0.
  1010. * @page_count: Not supported, must be 0.
  1011. * @smc_handle: Handle passed to smc call. Used to return
  1012. * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
  1013. *
  1014. * Implements a subset of the FF-A FFA_MEM_SHARE and FFA_MEM_LEND calls needed
  1015. * to share or lend memory from non-secure os to secure os (with no stream
  1016. * endpoints).
  1017. *
  1018. * Return: 0 on success, error code on failure.
  1019. */
  1020. long spmc_ffa_mem_send(uint32_t smc_fid,
  1021. bool secure_origin,
  1022. uint64_t total_length,
  1023. uint32_t fragment_length,
  1024. uint64_t address,
  1025. uint32_t page_count,
  1026. void *cookie,
  1027. void *handle,
  1028. uint64_t flags)
  1029. {
  1030. long ret;
  1031. struct spmc_shmem_obj *obj;
  1032. struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
  1033. ffa_mtd_flag32_t mtd_flag;
  1034. uint32_t ffa_version = get_partition_ffa_version(secure_origin);
  1035. size_t min_desc_size;
  1036. if (address != 0U || page_count != 0U) {
  1037. WARN("%s: custom memory region for message not supported.\n",
  1038. __func__);
  1039. return spmc_ffa_error_return(handle,
  1040. FFA_ERROR_INVALID_PARAMETER);
  1041. }
  1042. if (secure_origin) {
  1043. WARN("%s: unsupported share direction.\n", __func__);
  1044. return spmc_ffa_error_return(handle,
  1045. FFA_ERROR_INVALID_PARAMETER);
  1046. }
  1047. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  1048. min_desc_size = sizeof(struct ffa_mtd_v1_0);
  1049. } else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
  1050. min_desc_size = sizeof(struct ffa_mtd);
  1051. } else {
  1052. WARN("%s: bad FF-A version.\n", __func__);
  1053. return spmc_ffa_error_return(handle,
  1054. FFA_ERROR_INVALID_PARAMETER);
  1055. }
  1056. /* Check if the descriptor is too small for the FF-A version. */
  1057. if (fragment_length < min_desc_size) {
  1058. WARN("%s: bad first fragment size %u < %zu\n",
  1059. __func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
  1060. return spmc_ffa_error_return(handle,
  1061. FFA_ERROR_INVALID_PARAMETER);
  1062. }
  1063. if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_SHARE) {
  1064. mtd_flag = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
  1065. } else if ((smc_fid & FUNCID_NUM_MASK) == FFA_FNUM_MEM_LEND) {
  1066. mtd_flag = FFA_MTD_FLAG_TYPE_LEND_MEMORY;
  1067. } else {
  1068. WARN("%s: invalid memory management operation.\n", __func__);
  1069. return spmc_ffa_error_return(handle,
  1070. FFA_ERROR_INVALID_PARAMETER);
  1071. }
  1072. spin_lock(&spmc_shmem_obj_state.lock);
  1073. obj = spmc_shmem_obj_alloc(&spmc_shmem_obj_state, total_length);
  1074. if (obj == NULL) {
  1075. ret = FFA_ERROR_NO_MEMORY;
  1076. goto err_unlock;
  1077. }
  1078. spin_lock(&mbox->lock);
  1079. ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, mtd_flag,
  1080. ffa_version, handle);
  1081. spin_unlock(&mbox->lock);
  1082. spin_unlock(&spmc_shmem_obj_state.lock);
  1083. return ret;
  1084. err_unlock:
  1085. spin_unlock(&spmc_shmem_obj_state.lock);
  1086. return spmc_ffa_error_return(handle, ret);
  1087. }
  1088. /**
  1089. * spmc_ffa_mem_frag_tx - FFA_MEM_FRAG_TX implementation.
  1090. * @client: Client state.
  1091. * @handle_low: Handle_low value returned from FFA_MEM_FRAG_RX.
  1092. * @handle_high: Handle_high value returned from FFA_MEM_FRAG_RX.
  1093. * @fragment_length: Length of fragments transmitted.
  1094. * @sender_id: Vmid of sender in bits [31:16]
  1095. * @smc_handle: Handle passed to smc call. Used to return
  1096. * FFA_MEM_FRAG_RX or SMC_FC_FFA_SUCCESS.
  1097. *
  1098. * Return: @smc_handle on success, error code on failure.
  1099. */
  1100. long spmc_ffa_mem_frag_tx(uint32_t smc_fid,
  1101. bool secure_origin,
  1102. uint64_t handle_low,
  1103. uint64_t handle_high,
  1104. uint32_t fragment_length,
  1105. uint32_t sender_id,
  1106. void *cookie,
  1107. void *handle,
  1108. uint64_t flags)
  1109. {
  1110. long ret;
  1111. uint32_t desc_sender_id;
  1112. uint32_t ffa_version = get_partition_ffa_version(secure_origin);
  1113. struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
  1114. struct spmc_shmem_obj *obj;
  1115. uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
  1116. spin_lock(&spmc_shmem_obj_state.lock);
  1117. obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
  1118. if (obj == NULL) {
  1119. WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
  1120. __func__, mem_handle);
  1121. ret = FFA_ERROR_INVALID_PARAMETER;
  1122. goto err_unlock;
  1123. }
  1124. desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
  1125. if (sender_id != desc_sender_id) {
  1126. WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
  1127. sender_id, desc_sender_id);
  1128. ret = FFA_ERROR_INVALID_PARAMETER;
  1129. goto err_unlock;
  1130. }
  1131. if (obj->desc_filled == obj->desc_size) {
  1132. WARN("%s: object desc already filled, %zu\n", __func__,
  1133. obj->desc_filled);
  1134. ret = FFA_ERROR_INVALID_PARAMETER;
  1135. goto err_unlock;
  1136. }
  1137. spin_lock(&mbox->lock);
  1138. ret = spmc_ffa_fill_desc(mbox, obj, fragment_length, 0, ffa_version,
  1139. handle);
  1140. spin_unlock(&mbox->lock);
  1141. spin_unlock(&spmc_shmem_obj_state.lock);
  1142. return ret;
  1143. err_unlock:
  1144. spin_unlock(&spmc_shmem_obj_state.lock);
  1145. return spmc_ffa_error_return(handle, ret);
  1146. }
  1147. /**
  1148. * spmc_ffa_mem_retrieve_set_ns_bit - Set the NS bit in the response descriptor
  1149. * if the caller implements a version greater
  1150. * than FF-A 1.0 or if they have requested
  1151. * the functionality.
  1152. * TODO: We are assuming that the caller is
  1153. * an SP. To support retrieval from the
  1154. * normal world this function will need to be
  1155. * expanded accordingly.
  1156. * @resp: Descriptor populated in callers RX buffer.
  1157. * @sp_ctx: Context of the calling SP.
  1158. */
  1159. void spmc_ffa_mem_retrieve_set_ns_bit(struct ffa_mtd *resp,
  1160. struct secure_partition_desc *sp_ctx)
  1161. {
  1162. if (sp_ctx->ffa_version > MAKE_FFA_VERSION(1, 0) ||
  1163. sp_ctx->ns_bit_requested) {
  1164. /*
  1165. * Currently memory senders must reside in the normal
  1166. * world, and we do not have the functionlaity to change
  1167. * the state of memory dynamically. Therefore we can always set
  1168. * the NS bit to 1.
  1169. */
  1170. resp->memory_region_attributes |= FFA_MEM_ATTR_NS_BIT;
  1171. }
  1172. }
  1173. /**
  1174. * spmc_ffa_mem_retrieve_req - FFA_MEM_RETRIEVE_REQ implementation.
  1175. * @smc_fid: FID of SMC
  1176. * @total_length: Total length of retrieve request descriptor if this is
  1177. * the first call. Otherwise (unsupported) must be 0.
  1178. * @fragment_length: Length of fragment of retrieve request descriptor passed
  1179. * in this call. Only @fragment_length == @length is
  1180. * supported by this implementation.
  1181. * @address: Not supported, must be 0.
  1182. * @page_count: Not supported, must be 0.
  1183. * @smc_handle: Handle passed to smc call. Used to return
  1184. * FFA_MEM_RETRIEVE_RESP.
  1185. *
  1186. * Implements a subset of the FF-A FFA_MEM_RETRIEVE_REQ call.
  1187. * Used by secure os to retrieve memory already shared by non-secure os.
  1188. * If the data does not fit in a single FFA_MEM_RETRIEVE_RESP message,
  1189. * the client must call FFA_MEM_FRAG_RX until the full response has been
  1190. * received.
  1191. *
  1192. * Return: @handle on success, error code on failure.
  1193. */
  1194. long
  1195. spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
  1196. bool secure_origin,
  1197. uint32_t total_length,
  1198. uint32_t fragment_length,
  1199. uint64_t address,
  1200. uint32_t page_count,
  1201. void *cookie,
  1202. void *handle,
  1203. uint64_t flags)
  1204. {
  1205. int ret;
  1206. size_t buf_size;
  1207. size_t copy_size = 0;
  1208. size_t min_desc_size;
  1209. size_t out_desc_size = 0;
  1210. /*
  1211. * Currently we are only accessing fields that are the same in both the
  1212. * v1.0 and v1.1 mtd struct therefore we can use a v1.1 struct directly
  1213. * here. We only need validate against the appropriate struct size.
  1214. */
  1215. struct ffa_mtd *resp;
  1216. const struct ffa_mtd *req;
  1217. struct spmc_shmem_obj *obj = NULL;
  1218. struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
  1219. uint32_t ffa_version = get_partition_ffa_version(secure_origin);
  1220. struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
  1221. if (!secure_origin) {
  1222. WARN("%s: unsupported retrieve req direction.\n", __func__);
  1223. return spmc_ffa_error_return(handle,
  1224. FFA_ERROR_INVALID_PARAMETER);
  1225. }
  1226. if (address != 0U || page_count != 0U) {
  1227. WARN("%s: custom memory region not supported.\n", __func__);
  1228. return spmc_ffa_error_return(handle,
  1229. FFA_ERROR_INVALID_PARAMETER);
  1230. }
  1231. spin_lock(&mbox->lock);
  1232. req = mbox->tx_buffer;
  1233. resp = mbox->rx_buffer;
  1234. buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
  1235. if (mbox->rxtx_page_count == 0U) {
  1236. WARN("%s: buffer pair not registered.\n", __func__);
  1237. ret = FFA_ERROR_INVALID_PARAMETER;
  1238. goto err_unlock_mailbox;
  1239. }
  1240. if (mbox->state != MAILBOX_STATE_EMPTY) {
  1241. WARN("%s: RX Buffer is full! %d\n", __func__, mbox->state);
  1242. ret = FFA_ERROR_DENIED;
  1243. goto err_unlock_mailbox;
  1244. }
  1245. if (fragment_length != total_length) {
  1246. WARN("%s: fragmented retrieve request not supported.\n",
  1247. __func__);
  1248. ret = FFA_ERROR_INVALID_PARAMETER;
  1249. goto err_unlock_mailbox;
  1250. }
  1251. if (req->emad_count == 0U) {
  1252. WARN("%s: unsupported attribute desc count %u.\n",
  1253. __func__, obj->desc.emad_count);
  1254. ret = FFA_ERROR_INVALID_PARAMETER;
  1255. goto err_unlock_mailbox;
  1256. }
  1257. /* Determine the appropriate minimum descriptor size. */
  1258. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  1259. min_desc_size = sizeof(struct ffa_mtd_v1_0);
  1260. } else {
  1261. min_desc_size = sizeof(struct ffa_mtd);
  1262. }
  1263. if (total_length < min_desc_size) {
  1264. WARN("%s: invalid length %u < %zu\n", __func__, total_length,
  1265. min_desc_size);
  1266. ret = FFA_ERROR_INVALID_PARAMETER;
  1267. goto err_unlock_mailbox;
  1268. }
  1269. spin_lock(&spmc_shmem_obj_state.lock);
  1270. obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
  1271. if (obj == NULL) {
  1272. ret = FFA_ERROR_INVALID_PARAMETER;
  1273. goto err_unlock_all;
  1274. }
  1275. if (obj->desc_filled != obj->desc_size) {
  1276. WARN("%s: incomplete object desc filled %zu < size %zu\n",
  1277. __func__, obj->desc_filled, obj->desc_size);
  1278. ret = FFA_ERROR_INVALID_PARAMETER;
  1279. goto err_unlock_all;
  1280. }
  1281. if (req->emad_count != 0U && req->sender_id != obj->desc.sender_id) {
  1282. WARN("%s: wrong sender id 0x%x != 0x%x\n",
  1283. __func__, req->sender_id, obj->desc.sender_id);
  1284. ret = FFA_ERROR_INVALID_PARAMETER;
  1285. goto err_unlock_all;
  1286. }
  1287. if (req->emad_count != 0U && req->tag != obj->desc.tag) {
  1288. WARN("%s: wrong tag 0x%lx != 0x%lx\n",
  1289. __func__, req->tag, obj->desc.tag);
  1290. ret = FFA_ERROR_INVALID_PARAMETER;
  1291. goto err_unlock_all;
  1292. }
  1293. if (req->emad_count != 0U && req->emad_count != obj->desc.emad_count) {
  1294. WARN("%s: mistmatch of endpoint counts %u != %u\n",
  1295. __func__, req->emad_count, obj->desc.emad_count);
  1296. ret = FFA_ERROR_INVALID_PARAMETER;
  1297. goto err_unlock_all;
  1298. }
  1299. /* Ensure the NS bit is set to 0 in the request. */
  1300. if ((req->memory_region_attributes & FFA_MEM_ATTR_NS_BIT) != 0U) {
  1301. WARN("%s: NS mem attributes flags MBZ.\n", __func__);
  1302. ret = FFA_ERROR_INVALID_PARAMETER;
  1303. goto err_unlock_all;
  1304. }
  1305. if (req->flags != 0U) {
  1306. if ((req->flags & FFA_MTD_FLAG_TYPE_MASK) !=
  1307. (obj->desc.flags & FFA_MTD_FLAG_TYPE_MASK)) {
  1308. /*
  1309. * If the retrieve request specifies the memory
  1310. * transaction ensure it matches what we expect.
  1311. */
  1312. WARN("%s: wrong mem transaction flags %x != %x\n",
  1313. __func__, req->flags, obj->desc.flags);
  1314. ret = FFA_ERROR_INVALID_PARAMETER;
  1315. goto err_unlock_all;
  1316. }
  1317. if (req->flags != FFA_MTD_FLAG_TYPE_SHARE_MEMORY &&
  1318. req->flags != FFA_MTD_FLAG_TYPE_LEND_MEMORY) {
  1319. /*
  1320. * Current implementation does not support donate and
  1321. * it supports no other flags.
  1322. */
  1323. WARN("%s: invalid flags 0x%x\n", __func__, req->flags);
  1324. ret = FFA_ERROR_INVALID_PARAMETER;
  1325. goto err_unlock_all;
  1326. }
  1327. }
  1328. /* Validate the caller is a valid participant. */
  1329. if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
  1330. WARN("%s: Invalid endpoint ID (0x%x).\n",
  1331. __func__, sp_ctx->sp_id);
  1332. ret = FFA_ERROR_INVALID_PARAMETER;
  1333. goto err_unlock_all;
  1334. }
  1335. /* Validate that the provided emad offset and structure is valid.*/
  1336. for (size_t i = 0; i < req->emad_count; i++) {
  1337. size_t emad_size;
  1338. struct ffa_emad_v1_0 *emad;
  1339. emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
  1340. &emad_size);
  1341. if ((uintptr_t) emad >= (uintptr_t)
  1342. ((uint8_t *) req + total_length)) {
  1343. WARN("Invalid emad access.\n");
  1344. ret = FFA_ERROR_INVALID_PARAMETER;
  1345. goto err_unlock_all;
  1346. }
  1347. }
  1348. /*
  1349. * Validate all the endpoints match in the case of multiple
  1350. * borrowers. We don't mandate that the order of the borrowers
  1351. * must match in the descriptors therefore check to see if the
  1352. * endpoints match in any order.
  1353. */
  1354. for (size_t i = 0; i < req->emad_count; i++) {
  1355. bool found = false;
  1356. size_t emad_size;
  1357. struct ffa_emad_v1_0 *emad;
  1358. struct ffa_emad_v1_0 *other_emad;
  1359. emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
  1360. &emad_size);
  1361. for (size_t j = 0; j < obj->desc.emad_count; j++) {
  1362. other_emad = spmc_shmem_obj_get_emad(
  1363. &obj->desc, j, MAKE_FFA_VERSION(1, 1),
  1364. &emad_size);
  1365. if (req->emad_count &&
  1366. emad->mapd.endpoint_id ==
  1367. other_emad->mapd.endpoint_id) {
  1368. found = true;
  1369. break;
  1370. }
  1371. }
  1372. if (!found) {
  1373. WARN("%s: invalid receiver id (0x%x).\n",
  1374. __func__, emad->mapd.endpoint_id);
  1375. ret = FFA_ERROR_INVALID_PARAMETER;
  1376. goto err_unlock_all;
  1377. }
  1378. }
  1379. mbox->state = MAILBOX_STATE_FULL;
  1380. if (req->emad_count != 0U) {
  1381. obj->in_use++;
  1382. }
  1383. /*
  1384. * If the caller is v1.0 convert the descriptor, otherwise copy
  1385. * directly.
  1386. */
  1387. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  1388. ret = spmc_populate_ffa_v1_0_descriptor(resp, obj, buf_size, 0,
  1389. &copy_size,
  1390. &out_desc_size);
  1391. if (ret != 0U) {
  1392. ERROR("%s: Failed to process descriptor.\n", __func__);
  1393. goto err_unlock_all;
  1394. }
  1395. } else {
  1396. copy_size = MIN(obj->desc_size, buf_size);
  1397. out_desc_size = obj->desc_size;
  1398. memcpy(resp, &obj->desc, copy_size);
  1399. }
  1400. /* Set the NS bit in the response if applicable. */
  1401. spmc_ffa_mem_retrieve_set_ns_bit(resp, sp_ctx);
  1402. spin_unlock(&spmc_shmem_obj_state.lock);
  1403. spin_unlock(&mbox->lock);
  1404. SMC_RET8(handle, FFA_MEM_RETRIEVE_RESP, out_desc_size,
  1405. copy_size, 0, 0, 0, 0, 0);
  1406. err_unlock_all:
  1407. spin_unlock(&spmc_shmem_obj_state.lock);
  1408. err_unlock_mailbox:
  1409. spin_unlock(&mbox->lock);
  1410. return spmc_ffa_error_return(handle, ret);
  1411. }
  1412. /**
  1413. * spmc_ffa_mem_frag_rx - FFA_MEM_FRAG_RX implementation.
  1414. * @client: Client state.
  1415. * @handle_low: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[31:0].
  1416. * @handle_high: Handle passed to &FFA_MEM_RETRIEVE_REQ. Bit[63:32].
  1417. * @fragment_offset: Byte offset in descriptor to resume at.
  1418. * @sender_id: Bit[31:16]: Endpoint id of sender if client is a
  1419. * hypervisor. 0 otherwise.
  1420. * @smc_handle: Handle passed to smc call. Used to return
  1421. * FFA_MEM_FRAG_TX.
  1422. *
  1423. * Return: @smc_handle on success, error code on failure.
  1424. */
  1425. long spmc_ffa_mem_frag_rx(uint32_t smc_fid,
  1426. bool secure_origin,
  1427. uint32_t handle_low,
  1428. uint32_t handle_high,
  1429. uint32_t fragment_offset,
  1430. uint32_t sender_id,
  1431. void *cookie,
  1432. void *handle,
  1433. uint64_t flags)
  1434. {
  1435. int ret;
  1436. void *src;
  1437. size_t buf_size;
  1438. size_t copy_size;
  1439. size_t full_copy_size;
  1440. uint32_t desc_sender_id;
  1441. struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
  1442. uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
  1443. struct spmc_shmem_obj *obj;
  1444. uint32_t ffa_version = get_partition_ffa_version(secure_origin);
  1445. if (!secure_origin) {
  1446. WARN("%s: can only be called from swld.\n",
  1447. __func__);
  1448. return spmc_ffa_error_return(handle,
  1449. FFA_ERROR_INVALID_PARAMETER);
  1450. }
  1451. spin_lock(&spmc_shmem_obj_state.lock);
  1452. obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
  1453. if (obj == NULL) {
  1454. WARN("%s: invalid handle, 0x%lx, not a valid handle.\n",
  1455. __func__, mem_handle);
  1456. ret = FFA_ERROR_INVALID_PARAMETER;
  1457. goto err_unlock_shmem;
  1458. }
  1459. desc_sender_id = (uint32_t)obj->desc.sender_id << 16;
  1460. if (sender_id != 0U && sender_id != desc_sender_id) {
  1461. WARN("%s: invalid sender_id 0x%x != 0x%x\n", __func__,
  1462. sender_id, desc_sender_id);
  1463. ret = FFA_ERROR_INVALID_PARAMETER;
  1464. goto err_unlock_shmem;
  1465. }
  1466. if (fragment_offset >= obj->desc_size) {
  1467. WARN("%s: invalid fragment_offset 0x%x >= 0x%zx\n",
  1468. __func__, fragment_offset, obj->desc_size);
  1469. ret = FFA_ERROR_INVALID_PARAMETER;
  1470. goto err_unlock_shmem;
  1471. }
  1472. spin_lock(&mbox->lock);
  1473. if (mbox->rxtx_page_count == 0U) {
  1474. WARN("%s: buffer pair not registered.\n", __func__);
  1475. ret = FFA_ERROR_INVALID_PARAMETER;
  1476. goto err_unlock_all;
  1477. }
  1478. if (mbox->state != MAILBOX_STATE_EMPTY) {
  1479. WARN("%s: RX Buffer is full!\n", __func__);
  1480. ret = FFA_ERROR_DENIED;
  1481. goto err_unlock_all;
  1482. }
  1483. buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
  1484. mbox->state = MAILBOX_STATE_FULL;
  1485. /*
  1486. * If the caller is v1.0 convert the descriptor, otherwise copy
  1487. * directly.
  1488. */
  1489. if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
  1490. size_t out_desc_size;
  1491. ret = spmc_populate_ffa_v1_0_descriptor(mbox->rx_buffer, obj,
  1492. buf_size,
  1493. fragment_offset,
  1494. &copy_size,
  1495. &out_desc_size);
  1496. if (ret != 0U) {
  1497. ERROR("%s: Failed to process descriptor.\n", __func__);
  1498. goto err_unlock_all;
  1499. }
  1500. } else {
  1501. full_copy_size = obj->desc_size - fragment_offset;
  1502. copy_size = MIN(full_copy_size, buf_size);
  1503. src = &obj->desc;
  1504. memcpy(mbox->rx_buffer, src + fragment_offset, copy_size);
  1505. }
  1506. spin_unlock(&mbox->lock);
  1507. spin_unlock(&spmc_shmem_obj_state.lock);
  1508. SMC_RET8(handle, FFA_MEM_FRAG_TX, handle_low, handle_high,
  1509. copy_size, sender_id, 0, 0, 0);
  1510. err_unlock_all:
  1511. spin_unlock(&mbox->lock);
  1512. err_unlock_shmem:
  1513. spin_unlock(&spmc_shmem_obj_state.lock);
  1514. return spmc_ffa_error_return(handle, ret);
  1515. }
  1516. /**
  1517. * spmc_ffa_mem_relinquish - FFA_MEM_RELINQUISH implementation.
  1518. * @client: Client state.
  1519. *
  1520. * Implements a subset of the FF-A FFA_MEM_RELINQUISH call.
  1521. * Used by secure os release previously shared memory to non-secure os.
  1522. *
  1523. * The handle to release must be in the client's (secure os's) transmit buffer.
  1524. *
  1525. * Return: 0 on success, error code on failure.
  1526. */
  1527. int spmc_ffa_mem_relinquish(uint32_t smc_fid,
  1528. bool secure_origin,
  1529. uint32_t handle_low,
  1530. uint32_t handle_high,
  1531. uint32_t fragment_offset,
  1532. uint32_t sender_id,
  1533. void *cookie,
  1534. void *handle,
  1535. uint64_t flags)
  1536. {
  1537. int ret;
  1538. struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
  1539. struct spmc_shmem_obj *obj;
  1540. const struct ffa_mem_relinquish_descriptor *req;
  1541. struct secure_partition_desc *sp_ctx = spmc_get_current_sp_ctx();
  1542. if (!secure_origin) {
  1543. WARN("%s: unsupported relinquish direction.\n", __func__);
  1544. return spmc_ffa_error_return(handle,
  1545. FFA_ERROR_INVALID_PARAMETER);
  1546. }
  1547. spin_lock(&mbox->lock);
  1548. if (mbox->rxtx_page_count == 0U) {
  1549. WARN("%s: buffer pair not registered.\n", __func__);
  1550. ret = FFA_ERROR_INVALID_PARAMETER;
  1551. goto err_unlock_mailbox;
  1552. }
  1553. req = mbox->tx_buffer;
  1554. if (req->flags != 0U) {
  1555. WARN("%s: unsupported flags 0x%x\n", __func__, req->flags);
  1556. ret = FFA_ERROR_INVALID_PARAMETER;
  1557. goto err_unlock_mailbox;
  1558. }
  1559. if (req->endpoint_count == 0) {
  1560. WARN("%s: endpoint count cannot be 0.\n", __func__);
  1561. ret = FFA_ERROR_INVALID_PARAMETER;
  1562. goto err_unlock_mailbox;
  1563. }
  1564. spin_lock(&spmc_shmem_obj_state.lock);
  1565. obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, req->handle);
  1566. if (obj == NULL) {
  1567. ret = FFA_ERROR_INVALID_PARAMETER;
  1568. goto err_unlock_all;
  1569. }
  1570. /*
  1571. * Validate the endpoint ID was populated correctly. We don't currently
  1572. * support proxy endpoints so the endpoint count should always be 1.
  1573. */
  1574. if (req->endpoint_count != 1U) {
  1575. WARN("%s: unsupported endpoint count %u != 1\n", __func__,
  1576. req->endpoint_count);
  1577. ret = FFA_ERROR_INVALID_PARAMETER;
  1578. goto err_unlock_all;
  1579. }
  1580. /* Validate provided endpoint ID matches the partition ID. */
  1581. if (req->endpoint_array[0] != sp_ctx->sp_id) {
  1582. WARN("%s: invalid endpoint ID %u != %u\n", __func__,
  1583. req->endpoint_array[0], sp_ctx->sp_id);
  1584. ret = FFA_ERROR_INVALID_PARAMETER;
  1585. goto err_unlock_all;
  1586. }
  1587. /* Validate the caller is a valid participant. */
  1588. if (!spmc_shmem_obj_validate_id(obj, sp_ctx->sp_id)) {
  1589. WARN("%s: Invalid endpoint ID (0x%x).\n",
  1590. __func__, req->endpoint_array[0]);
  1591. ret = FFA_ERROR_INVALID_PARAMETER;
  1592. goto err_unlock_all;
  1593. }
  1594. if (obj->in_use == 0U) {
  1595. ret = FFA_ERROR_INVALID_PARAMETER;
  1596. goto err_unlock_all;
  1597. }
  1598. obj->in_use--;
  1599. spin_unlock(&spmc_shmem_obj_state.lock);
  1600. spin_unlock(&mbox->lock);
  1601. SMC_RET1(handle, FFA_SUCCESS_SMC32);
  1602. err_unlock_all:
  1603. spin_unlock(&spmc_shmem_obj_state.lock);
  1604. err_unlock_mailbox:
  1605. spin_unlock(&mbox->lock);
  1606. return spmc_ffa_error_return(handle, ret);
  1607. }
  1608. /**
  1609. * spmc_ffa_mem_reclaim - FFA_MEM_RECLAIM implementation.
  1610. * @client: Client state.
  1611. * @handle_low: Unique handle of shared memory object to reclaim. Bit[31:0].
  1612. * @handle_high: Unique handle of shared memory object to reclaim.
  1613. * Bit[63:32].
  1614. * @flags: Unsupported, ignored.
  1615. *
  1616. * Implements a subset of the FF-A FFA_MEM_RECLAIM call.
  1617. * Used by non-secure os reclaim memory previously shared with secure os.
  1618. *
  1619. * Return: 0 on success, error code on failure.
  1620. */
  1621. int spmc_ffa_mem_reclaim(uint32_t smc_fid,
  1622. bool secure_origin,
  1623. uint32_t handle_low,
  1624. uint32_t handle_high,
  1625. uint32_t mem_flags,
  1626. uint64_t x4,
  1627. void *cookie,
  1628. void *handle,
  1629. uint64_t flags)
  1630. {
  1631. int ret;
  1632. struct spmc_shmem_obj *obj;
  1633. uint64_t mem_handle = handle_low | (((uint64_t)handle_high) << 32);
  1634. if (secure_origin) {
  1635. WARN("%s: unsupported reclaim direction.\n", __func__);
  1636. return spmc_ffa_error_return(handle,
  1637. FFA_ERROR_INVALID_PARAMETER);
  1638. }
  1639. if (mem_flags != 0U) {
  1640. WARN("%s: unsupported flags 0x%x\n", __func__, mem_flags);
  1641. return spmc_ffa_error_return(handle,
  1642. FFA_ERROR_INVALID_PARAMETER);
  1643. }
  1644. spin_lock(&spmc_shmem_obj_state.lock);
  1645. obj = spmc_shmem_obj_lookup(&spmc_shmem_obj_state, mem_handle);
  1646. if (obj == NULL) {
  1647. ret = FFA_ERROR_INVALID_PARAMETER;
  1648. goto err_unlock;
  1649. }
  1650. if (obj->in_use != 0U) {
  1651. ret = FFA_ERROR_DENIED;
  1652. goto err_unlock;
  1653. }
  1654. if (obj->desc_filled != obj->desc_size) {
  1655. WARN("%s: incomplete object desc filled %zu < size %zu\n",
  1656. __func__, obj->desc_filled, obj->desc_size);
  1657. ret = FFA_ERROR_INVALID_PARAMETER;
  1658. goto err_unlock;
  1659. }
  1660. /* Allow for platform specific operations to be performed. */
  1661. ret = plat_spmc_shmem_reclaim(&obj->desc);
  1662. if (ret != 0) {
  1663. goto err_unlock;
  1664. }
  1665. spmc_shmem_obj_free(&spmc_shmem_obj_state, obj);
  1666. spin_unlock(&spmc_shmem_obj_state.lock);
  1667. SMC_RET1(handle, FFA_SUCCESS_SMC32);
  1668. err_unlock:
  1669. spin_unlock(&spmc_shmem_obj_state.lock);
  1670. return spmc_ffa_error_return(handle, ret);
  1671. }