047-spi_message_queue.patch 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. commit ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0
  2. Author: Linus Walleij <linus.walleij@linaro.org>
  3. Date: Wed Feb 22 10:05:38 2012 +0100
  4. spi: create a message queueing infrastructure
  5. This rips the message queue in the PL022 driver out and pushes
  6. it into (optional) common infrastructure. Drivers that want to
  7. use the message pumping thread will need to define the new
  8. per-messags transfer methods and leave the deprecated transfer()
  9. method as NULL.
  10. Most of the design is described in the documentation changes that
  11. are included in this patch.
  12. Since there is a queue that need to be stopped when the system
  13. is suspending/resuming, two new calls are implemented for the
  14. device drivers to call in their suspend()/resume() functions:
  15. spi_master_suspend() and spi_master_resume().
  16. ChangeLog v1->v2:
  17. - Remove Kconfig entry and do not make the queue support optional
  18. at all, instead be more agressive and have it as part of the
  19. compulsory infrastructure.
  20. - If the .transfer() method is implemented, delete print a small
  21. deprecation notice and do not start the transfer pump.
  22. - Fix a bitrotted comment.
  23. ChangeLog v2->v3:
  24. - Fix up a problematic sequence courtesy of Chris Blair.
  25. - Stop rather than destroy the queue on suspend() courtesy of
  26. Chris Blair.
  27. Signed-off-by: Chris Blair <chris.blair@stericsson.com>
  28. Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
  29. Tested-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
  30. Reviewed-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
  31. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
  32. [Florian: dropped the changes on drivers/spi/spi-pl022.c, removed
  33. the dev_info() about unqueued drivers still using the master function]
  34. --- a/Documentation/spi/spi-summary
  35. +++ b/Documentation/spi/spi-summary
  36. @@ -1,7 +1,7 @@
  37. Overview of Linux kernel SPI support
  38. ====================================
  39. -21-May-2007
  40. +02-Feb-2012
  41. What is SPI?
  42. ------------
  43. @@ -483,9 +483,9 @@ also initialize its own internal state.
  44. and those methods.)
  45. After you initialize the spi_master, then use spi_register_master() to
  46. -publish it to the rest of the system. At that time, device nodes for
  47. -the controller and any predeclared spi devices will be made available,
  48. -and the driver model core will take care of binding them to drivers.
  49. +publish it to the rest of the system. At that time, device nodes for the
  50. +controller and any predeclared spi devices will be made available, and
  51. +the driver model core will take care of binding them to drivers.
  52. If you need to remove your SPI controller driver, spi_unregister_master()
  53. will reverse the effect of spi_register_master().
  54. @@ -521,21 +521,53 @@ SPI MASTER METHODS
  55. ** When you code setup(), ASSUME that the controller
  56. ** is actively processing transfers for another device.
  57. - master->transfer(struct spi_device *spi, struct spi_message *message)
  58. - This must not sleep. Its responsibility is arrange that the
  59. - transfer happens and its complete() callback is issued. The two
  60. - will normally happen later, after other transfers complete, and
  61. - if the controller is idle it will need to be kickstarted.
  62. -
  63. master->cleanup(struct spi_device *spi)
  64. Your controller driver may use spi_device.controller_state to hold
  65. state it dynamically associates with that device. If you do that,
  66. be sure to provide the cleanup() method to free that state.
  67. + master->prepare_transfer_hardware(struct spi_master *master)
  68. + This will be called by the queue mechanism to signal to the driver
  69. + that a message is coming in soon, so the subsystem requests the
  70. + driver to prepare the transfer hardware by issuing this call.
  71. + This may sleep.
  72. +
  73. + master->unprepare_transfer_hardware(struct spi_master *master)
  74. + This will be called by the queue mechanism to signal to the driver
  75. + that there are no more messages pending in the queue and it may
  76. + relax the hardware (e.g. by power management calls). This may sleep.
  77. +
  78. + master->transfer_one_message(struct spi_master *master,
  79. + struct spi_message *mesg)
  80. + The subsystem calls the driver to transfer a single message while
  81. + queuing transfers that arrive in the meantime. When the driver is
  82. + finished with this message, it must call
  83. + spi_finalize_current_message() so the subsystem can issue the next
  84. + transfer. This may sleep.
  85. +
  86. + DEPRECATED METHODS
  87. +
  88. + master->transfer(struct spi_device *spi, struct spi_message *message)
  89. + This must not sleep. Its responsibility is arrange that the
  90. + transfer happens and its complete() callback is issued. The two
  91. + will normally happen later, after other transfers complete, and
  92. + if the controller is idle it will need to be kickstarted. This
  93. + method is not used on queued controllers and must be NULL if
  94. + transfer_one_message() and (un)prepare_transfer_hardware() are
  95. + implemented.
  96. +
  97. SPI MESSAGE QUEUE
  98. -The bulk of the driver will be managing the I/O queue fed by transfer().
  99. +If you are happy with the standard queueing mechanism provided by the
  100. +SPI subsystem, just implement the queued methods specified above. Using
  101. +the message queue has the upside of centralizing a lot of code and
  102. +providing pure process-context execution of methods. The message queue
  103. +can also be elevated to realtime priority on high-priority SPI traffic.
  104. +
  105. +Unless the queueing mechanism in the SPI subsystem is selected, the bulk
  106. +of the driver will be managing the I/O queue fed by the now deprecated
  107. +function transfer().
  108. That queue could be purely conceptual. For example, a driver used only
  109. for low-frequency sensor access might be fine using synchronous PIO.
  110. @@ -561,4 +593,6 @@ Stephen Street
  111. Mark Underwood
  112. Andrew Victor
  113. Vitaly Wool
  114. -
  115. +Grant Likely
  116. +Mark Brown
  117. +Linus Walleij
  118. --- a/drivers/spi/spi.c
  119. +++ b/drivers/spi/spi.c
  120. @@ -30,6 +30,9 @@
  121. #include <linux/of_spi.h>
  122. #include <linux/pm_runtime.h>
  123. #include <linux/export.h>
  124. +#include <linux/sched.h>
  125. +#include <linux/delay.h>
  126. +#include <linux/kthread.h>
  127. static void spidev_release(struct device *dev)
  128. {
  129. @@ -507,6 +510,293 @@ spi_register_board_info(struct spi_board
  130. /*-------------------------------------------------------------------------*/
  131. +/**
  132. + * spi_pump_messages - kthread work function which processes spi message queue
  133. + * @work: pointer to kthread work struct contained in the master struct
  134. + *
  135. + * This function checks if there is any spi message in the queue that
  136. + * needs processing and if so call out to the driver to initialize hardware
  137. + * and transfer each message.
  138. + *
  139. + */
  140. +static void spi_pump_messages(struct kthread_work *work)
  141. +{
  142. + struct spi_master *master =
  143. + container_of(work, struct spi_master, pump_messages);
  144. + unsigned long flags;
  145. + bool was_busy = false;
  146. + int ret;
  147. +
  148. + /* Lock queue and check for queue work */
  149. + spin_lock_irqsave(&master->queue_lock, flags);
  150. + if (list_empty(&master->queue) || !master->running) {
  151. + if (master->busy) {
  152. + ret = master->unprepare_transfer_hardware(master);
  153. + if (ret) {
  154. + dev_err(&master->dev,
  155. + "failed to unprepare transfer hardware\n");
  156. + return;
  157. + }
  158. + }
  159. + master->busy = false;
  160. + spin_unlock_irqrestore(&master->queue_lock, flags);
  161. + return;
  162. + }
  163. +
  164. + /* Make sure we are not already running a message */
  165. + if (master->cur_msg) {
  166. + spin_unlock_irqrestore(&master->queue_lock, flags);
  167. + return;
  168. + }
  169. + /* Extract head of queue */
  170. + master->cur_msg =
  171. + list_entry(master->queue.next, struct spi_message, queue);
  172. +
  173. + list_del_init(&master->cur_msg->queue);
  174. + if (master->busy)
  175. + was_busy = true;
  176. + else
  177. + master->busy = true;
  178. + spin_unlock_irqrestore(&master->queue_lock, flags);
  179. +
  180. + if (!was_busy) {
  181. + ret = master->prepare_transfer_hardware(master);
  182. + if (ret) {
  183. + dev_err(&master->dev,
  184. + "failed to prepare transfer hardware\n");
  185. + return;
  186. + }
  187. + }
  188. +
  189. + ret = master->transfer_one_message(master, master->cur_msg);
  190. + if (ret) {
  191. + dev_err(&master->dev,
  192. + "failed to transfer one message from queue\n");
  193. + return;
  194. + }
  195. +}
  196. +
  197. +static int spi_init_queue(struct spi_master *master)
  198. +{
  199. + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  200. +
  201. + INIT_LIST_HEAD(&master->queue);
  202. + spin_lock_init(&master->queue_lock);
  203. +
  204. + master->running = false;
  205. + master->busy = false;
  206. +
  207. + init_kthread_worker(&master->kworker);
  208. + master->kworker_task = kthread_run(kthread_worker_fn,
  209. + &master->kworker,
  210. + dev_name(&master->dev));
  211. + if (IS_ERR(master->kworker_task)) {
  212. + dev_err(&master->dev, "failed to create message pump task\n");
  213. + return -ENOMEM;
  214. + }
  215. + init_kthread_work(&master->pump_messages, spi_pump_messages);
  216. +
  217. + /*
  218. + * Master config will indicate if this controller should run the
  219. + * message pump with high (realtime) priority to reduce the transfer
  220. + * latency on the bus by minimising the delay between a transfer
  221. + * request and the scheduling of the message pump thread. Without this
  222. + * setting the message pump thread will remain at default priority.
  223. + */
  224. + if (master->rt) {
  225. + dev_info(&master->dev,
  226. + "will run message pump with realtime priority\n");
  227. + sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
  228. + }
  229. +
  230. + return 0;
  231. +}
  232. +
  233. +/**
  234. + * spi_get_next_queued_message() - called by driver to check for queued
  235. + * messages
  236. + * @master: the master to check for queued messages
  237. + *
  238. + * If there are more messages in the queue, the next message is returned from
  239. + * this call.
  240. + */
  241. +struct spi_message *spi_get_next_queued_message(struct spi_master *master)
  242. +{
  243. + struct spi_message *next;
  244. + unsigned long flags;
  245. +
  246. + /* get a pointer to the next message, if any */
  247. + spin_lock_irqsave(&master->queue_lock, flags);
  248. + if (list_empty(&master->queue))
  249. + next = NULL;
  250. + else
  251. + next = list_entry(master->queue.next,
  252. + struct spi_message, queue);
  253. + spin_unlock_irqrestore(&master->queue_lock, flags);
  254. +
  255. + return next;
  256. +}
  257. +EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
  258. +
  259. +/**
  260. + * spi_finalize_current_message() - the current message is complete
  261. + * @master: the master to return the message to
  262. + *
  263. + * Called by the driver to notify the core that the message in the front of the
  264. + * queue is complete and can be removed from the queue.
  265. + */
  266. +void spi_finalize_current_message(struct spi_master *master)
  267. +{
  268. + struct spi_message *mesg;
  269. + unsigned long flags;
  270. +
  271. + spin_lock_irqsave(&master->queue_lock, flags);
  272. + mesg = master->cur_msg;
  273. + master->cur_msg = NULL;
  274. +
  275. + queue_kthread_work(&master->kworker, &master->pump_messages);
  276. + spin_unlock_irqrestore(&master->queue_lock, flags);
  277. +
  278. + mesg->state = NULL;
  279. + if (mesg->complete)
  280. + mesg->complete(mesg->context);
  281. +}
  282. +EXPORT_SYMBOL_GPL(spi_finalize_current_message);
  283. +
  284. +static int spi_start_queue(struct spi_master *master)
  285. +{
  286. + unsigned long flags;
  287. +
  288. + spin_lock_irqsave(&master->queue_lock, flags);
  289. +
  290. + if (master->running || master->busy) {
  291. + spin_unlock_irqrestore(&master->queue_lock, flags);
  292. + return -EBUSY;
  293. + }
  294. +
  295. + master->running = true;
  296. + master->cur_msg = NULL;
  297. + spin_unlock_irqrestore(&master->queue_lock, flags);
  298. +
  299. + queue_kthread_work(&master->kworker, &master->pump_messages);
  300. +
  301. + return 0;
  302. +}
  303. +
  304. +static int spi_stop_queue(struct spi_master *master)
  305. +{
  306. + unsigned long flags;
  307. + unsigned limit = 500;
  308. + int ret = 0;
  309. +
  310. + spin_lock_irqsave(&master->queue_lock, flags);
  311. +
  312. + /*
  313. + * This is a bit lame, but is optimized for the common execution path.
  314. + * A wait_queue on the master->busy could be used, but then the common
  315. + * execution path (pump_messages) would be required to call wake_up or
  316. + * friends on every SPI message. Do this instead.
  317. + */
  318. + while ((!list_empty(&master->queue) || master->busy) && limit--) {
  319. + spin_unlock_irqrestore(&master->queue_lock, flags);
  320. + msleep(10);
  321. + spin_lock_irqsave(&master->queue_lock, flags);
  322. + }
  323. +
  324. + if (!list_empty(&master->queue) || master->busy)
  325. + ret = -EBUSY;
  326. + else
  327. + master->running = false;
  328. +
  329. + spin_unlock_irqrestore(&master->queue_lock, flags);
  330. +
  331. + if (ret) {
  332. + dev_warn(&master->dev,
  333. + "could not stop message queue\n");
  334. + return ret;
  335. + }
  336. + return ret;
  337. +}
  338. +
  339. +static int spi_destroy_queue(struct spi_master *master)
  340. +{
  341. + int ret;
  342. +
  343. + ret = spi_stop_queue(master);
  344. +
  345. + /*
  346. + * flush_kthread_worker will block until all work is done.
  347. + * If the reason that stop_queue timed out is that the work will never
  348. + * finish, then it does no good to call flush/stop thread, so
  349. + * return anyway.
  350. + */
  351. + if (ret) {
  352. + dev_err(&master->dev, "problem destroying queue\n");
  353. + return ret;
  354. + }
  355. +
  356. + flush_kthread_worker(&master->kworker);
  357. + kthread_stop(master->kworker_task);
  358. +
  359. + return 0;
  360. +}
  361. +
  362. +/**
  363. + * spi_queued_transfer - transfer function for queued transfers
  364. + * @spi: spi device which is requesting transfer
  365. + * @msg: spi message which is to handled is queued to driver queue
  366. + */
  367. +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
  368. +{
  369. + struct spi_master *master = spi->master;
  370. + unsigned long flags;
  371. +
  372. + spin_lock_irqsave(&master->queue_lock, flags);
  373. +
  374. + if (!master->running) {
  375. + spin_unlock_irqrestore(&master->queue_lock, flags);
  376. + return -ESHUTDOWN;
  377. + }
  378. + msg->actual_length = 0;
  379. + msg->status = -EINPROGRESS;
  380. +
  381. + list_add_tail(&msg->queue, &master->queue);
  382. + if (master->running && !master->busy)
  383. + queue_kthread_work(&master->kworker, &master->pump_messages);
  384. +
  385. + spin_unlock_irqrestore(&master->queue_lock, flags);
  386. + return 0;
  387. +}
  388. +
  389. +static int spi_master_initialize_queue(struct spi_master *master)
  390. +{
  391. + int ret;
  392. +
  393. + master->queued = true;
  394. + master->transfer = spi_queued_transfer;
  395. +
  396. + /* Initialize and start queue */
  397. + ret = spi_init_queue(master);
  398. + if (ret) {
  399. + dev_err(&master->dev, "problem initializing queue\n");
  400. + goto err_init_queue;
  401. + }
  402. + ret = spi_start_queue(master);
  403. + if (ret) {
  404. + dev_err(&master->dev, "problem starting queue\n");
  405. + goto err_start_queue;
  406. + }
  407. +
  408. + return 0;
  409. +
  410. +err_start_queue:
  411. +err_init_queue:
  412. + spi_destroy_queue(master);
  413. + return ret;
  414. +}
  415. +
  416. +/*-------------------------------------------------------------------------*/
  417. +
  418. static void spi_master_release(struct device *dev)
  419. {
  420. struct spi_master *master;
  421. @@ -522,6 +812,7 @@ static struct class spi_master_class = {
  422. };
  423. +
  424. /**
  425. * spi_alloc_master - allocate SPI master controller
  426. * @dev: the controller, possibly using the platform_bus
  427. @@ -621,6 +912,15 @@ int spi_register_master(struct spi_maste
  428. dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
  429. dynamic ? " (dynamic)" : "");
  430. + /* If we're using a queued driver, start the queue */
  431. + if (!master->transfer) {
  432. + status = spi_master_initialize_queue(master);
  433. + if (status) {
  434. + device_unregister(&master->dev);
  435. + goto done;
  436. + }
  437. + }
  438. +
  439. mutex_lock(&board_lock);
  440. list_add_tail(&master->list, &spi_master_list);
  441. list_for_each_entry(bi, &board_list, list)
  442. @@ -636,7 +936,6 @@ done:
  443. }
  444. EXPORT_SYMBOL_GPL(spi_register_master);
  445. -
  446. static int __unregister(struct device *dev, void *null)
  447. {
  448. spi_unregister_device(to_spi_device(dev));
  449. @@ -657,6 +956,11 @@ void spi_unregister_master(struct spi_ma
  450. {
  451. int dummy;
  452. + if (master->queued) {
  453. + if (spi_destroy_queue(master))
  454. + dev_err(&master->dev, "queue remove failed\n");
  455. + }
  456. +
  457. mutex_lock(&board_lock);
  458. list_del(&master->list);
  459. mutex_unlock(&board_lock);
  460. @@ -666,6 +970,37 @@ void spi_unregister_master(struct spi_ma
  461. }
  462. EXPORT_SYMBOL_GPL(spi_unregister_master);
  463. +int spi_master_suspend(struct spi_master *master)
  464. +{
  465. + int ret;
  466. +
  467. + /* Basically no-ops for non-queued masters */
  468. + if (!master->queued)
  469. + return 0;
  470. +
  471. + ret = spi_stop_queue(master);
  472. + if (ret)
  473. + dev_err(&master->dev, "queue stop failed\n");
  474. +
  475. + return ret;
  476. +}
  477. +EXPORT_SYMBOL_GPL(spi_master_suspend);
  478. +
  479. +int spi_master_resume(struct spi_master *master)
  480. +{
  481. + int ret;
  482. +
  483. + if (!master->queued)
  484. + return 0;
  485. +
  486. + ret = spi_start_queue(master);
  487. + if (ret)
  488. + dev_err(&master->dev, "queue restart failed\n");
  489. +
  490. + return ret;
  491. +}
  492. +EXPORT_SYMBOL_GPL(spi_master_resume);
  493. +
  494. static int __spi_master_match(struct device *dev, void *data)
  495. {
  496. struct spi_master *m;
  497. --- a/include/linux/spi/spi.h
  498. +++ b/include/linux/spi/spi.h
  499. @@ -22,6 +22,7 @@
  500. #include <linux/device.h>
  501. #include <linux/mod_devicetable.h>
  502. #include <linux/slab.h>
  503. +#include <linux/kthread.h>
  504. /*
  505. * INTERFACES between SPI master-side drivers and SPI infrastructure.
  506. @@ -235,6 +236,27 @@ static inline void spi_unregister_driver
  507. * the device whose settings are being modified.
  508. * @transfer: adds a message to the controller's transfer queue.
  509. * @cleanup: frees controller-specific state
  510. + * @queued: whether this master is providing an internal message queue
  511. + * @kworker: thread struct for message pump
  512. + * @kworker_task: pointer to task for message pump kworker thread
  513. + * @pump_messages: work struct for scheduling work to the message pump
  514. + * @queue_lock: spinlock to syncronise access to message queue
  515. + * @queue: message queue
  516. + * @cur_msg: the currently in-flight message
  517. + * @busy: message pump is busy
  518. + * @running: message pump is running
  519. + * @rt: whether this queue is set to run as a realtime task
  520. + * @prepare_transfer_hardware: a message will soon arrive from the queue
  521. + * so the subsystem requests the driver to prepare the transfer hardware
  522. + * by issuing this call
  523. + * @transfer_one_message: the subsystem calls the driver to transfer a single
  524. + * message while queuing transfers that arrive in the meantime. When the
  525. + * driver is finished with this message, it must call
  526. + * spi_finalize_current_message() so the subsystem can issue the next
  527. + * transfer
  528. + * @prepare_transfer_hardware: there are currently no more messages on the
  529. + * queue so the subsystem notifies the driver that it may relax the
  530. + * hardware by issuing this call
  531. *
  532. * Each SPI master controller can communicate with one or more @spi_device
  533. * children. These make a small bus, sharing MOSI, MISO and SCK signals
  534. @@ -318,6 +340,28 @@ struct spi_master {
  535. /* called on release() to free memory provided by spi_master */
  536. void (*cleanup)(struct spi_device *spi);
  537. +
  538. + /*
  539. + * These hooks are for drivers that want to use the generic
  540. + * master transfer queueing mechanism. If these are used, the
  541. + * transfer() function above must NOT be specified by the driver.
  542. + * Over time we expect SPI drivers to be phased over to this API.
  543. + */
  544. + bool queued;
  545. + struct kthread_worker kworker;
  546. + struct task_struct *kworker_task;
  547. + struct kthread_work pump_messages;
  548. + spinlock_t queue_lock;
  549. + struct list_head queue;
  550. + struct spi_message *cur_msg;
  551. + bool busy;
  552. + bool running;
  553. + bool rt;
  554. +
  555. + int (*prepare_transfer_hardware)(struct spi_master *master);
  556. + int (*transfer_one_message)(struct spi_master *master,
  557. + struct spi_message *mesg);
  558. + int (*unprepare_transfer_hardware)(struct spi_master *master);
  559. };
  560. static inline void *spi_master_get_devdata(struct spi_master *master)
  561. @@ -343,6 +387,13 @@ static inline void spi_master_put(struct
  562. put_device(&master->dev);
  563. }
  564. +/* PM calls that need to be issued by the driver */
  565. +extern int spi_master_suspend(struct spi_master *master);
  566. +extern int spi_master_resume(struct spi_master *master);
  567. +
  568. +/* Calls the driver make to interact with the message queue */
  569. +extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
  570. +extern void spi_finalize_current_message(struct spi_master *master);
  571. /* the spi driver core manages memory for the spi_master classdev */
  572. extern struct spi_master *