300-ath9k-Switch-to-using-mac80211-intermediate-software.patch 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= <toke@toke.dk>
  2. Date: Fri, 2 Sep 2016 16:00:30 +0200
  3. Subject: [PATCH] ath9k: Switch to using mac80211 intermediate software
  4. queues.
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. This switches ath9k over to using the mac80211 intermediate software
  9. queueing mechanism for data packets. It removes the queueing inside the
  10. driver, except for the retry queue, and instead pulls from mac80211 when
  11. a packet is needed. The retry queue is used to store a packet that was
  12. pulled but can't be sent immediately.
  13. The old code path in ath_tx_start that would queue packets has been
  14. removed completely, as has the qlen limit tunables (since there's no
  15. longer a queue in the driver to limit).
  16. Based on Tim's original patch set, but reworked quite thoroughly.
  17. Cc: Tim Shepard <shep@alum.mit.edu>
  18. Cc: Felix Fietkau <nbd@nbd.name>
  19. Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
  20. ---
  21. --- a/drivers/net/wireless/ath/ath9k/ath9k.h
  22. +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
  23. @@ -91,7 +91,6 @@ int ath_descdma_setup(struct ath_softc *
  24. #define ATH_RXBUF 512
  25. #define ATH_TXBUF 512
  26. #define ATH_TXBUF_RESERVE 5
  27. -#define ATH_MAX_QDEPTH (ATH_TXBUF / 4 - ATH_TXBUF_RESERVE)
  28. #define ATH_TXMAXTRY 13
  29. #define ATH_MAX_SW_RETRIES 30
  30. @@ -145,7 +144,7 @@ int ath_descdma_setup(struct ath_softc *
  31. #define BAW_WITHIN(_start, _bawsz, _seqno) \
  32. ((((_seqno) - (_start)) & 4095) < (_bawsz))
  33. -#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
  34. +#define ATH_AN_2_TID(_an, _tidno) ath_node_to_tid(_an, _tidno)
  35. #define IS_HT_RATE(rate) (rate & 0x80)
  36. #define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
  37. @@ -164,7 +163,6 @@ struct ath_txq {
  38. spinlock_t axq_lock;
  39. u32 axq_depth;
  40. u32 axq_ampdu_depth;
  41. - bool stopped;
  42. bool axq_tx_inprogress;
  43. struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
  44. u8 txq_headidx;
  45. @@ -232,7 +230,6 @@ struct ath_buf {
  46. struct ath_atx_tid {
  47. struct list_head list;
  48. - struct sk_buff_head buf_q;
  49. struct sk_buff_head retry_q;
  50. struct ath_node *an;
  51. struct ath_txq *txq;
  52. @@ -247,13 +244,13 @@ struct ath_atx_tid {
  53. s8 bar_index;
  54. bool active;
  55. bool clear_ps_filter;
  56. + bool has_queued;
  57. };
  58. struct ath_node {
  59. struct ath_softc *sc;
  60. struct ieee80211_sta *sta; /* station struct we're part of */
  61. struct ieee80211_vif *vif; /* interface with which we're associated */
  62. - struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
  63. u16 maxampdu;
  64. u8 mpdudensity;
  65. @@ -276,7 +273,6 @@ struct ath_tx_control {
  66. struct ath_node *an;
  67. struct ieee80211_sta *sta;
  68. u8 paprd;
  69. - bool force_channel;
  70. };
  71. @@ -293,7 +289,6 @@ struct ath_tx {
  72. struct ath_descdma txdma;
  73. struct ath_txq *txq_map[IEEE80211_NUM_ACS];
  74. struct ath_txq *uapsdq;
  75. - u32 txq_max_pending[IEEE80211_NUM_ACS];
  76. u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
  77. };
  78. @@ -421,6 +416,22 @@ struct ath_offchannel {
  79. int duration;
  80. };
  81. +static inline struct ath_atx_tid *
  82. +ath_node_to_tid(struct ath_node *an, u8 tidno)
  83. +{
  84. + struct ieee80211_sta *sta = an->sta;
  85. + struct ieee80211_vif *vif = an->vif;
  86. + struct ieee80211_txq *txq;
  87. +
  88. + BUG_ON(!vif);
  89. + if (sta)
  90. + txq = sta->txq[tidno % ARRAY_SIZE(sta->txq)];
  91. + else
  92. + txq = vif->txq;
  93. +
  94. + return (struct ath_atx_tid *) txq->drv_priv;
  95. +}
  96. +
  97. #define case_rtn_string(val) case val: return #val
  98. #define ath_for_each_chanctx(_sc, _ctx) \
  99. @@ -575,7 +586,6 @@ void ath_tx_edma_tasklet(struct ath_soft
  100. int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
  101. u16 tid, u16 *ssn);
  102. void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
  103. -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
  104. void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
  105. void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
  106. @@ -585,6 +595,7 @@ void ath9k_release_buffered_frames(struc
  107. u16 tids, int nframes,
  108. enum ieee80211_frame_release_type reason,
  109. bool more_data);
  110. +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue);
  111. /********/
  112. /* VIFs */
  113. --- a/drivers/net/wireless/ath/ath9k/channel.c
  114. +++ b/drivers/net/wireless/ath/ath9k/channel.c
  115. @@ -1010,7 +1010,6 @@ static void ath_scan_send_probe(struct a
  116. goto error;
  117. txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
  118. - txctl.force_channel = true;
  119. if (ath_tx_start(sc->hw, skb, &txctl))
  120. goto error;
  121. @@ -1133,7 +1132,6 @@ ath_chanctx_send_vif_ps_frame(struct ath
  122. memset(&txctl, 0, sizeof(txctl));
  123. txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
  124. txctl.sta = sta;
  125. - txctl.force_channel = true;
  126. if (ath_tx_start(sc->hw, skb, &txctl)) {
  127. ieee80211_free_txskb(sc->hw, skb);
  128. return false;
  129. --- a/drivers/net/wireless/ath/ath9k/debug.c
  130. +++ b/drivers/net/wireless/ath/ath9k/debug.c
  131. @@ -600,7 +600,6 @@ static int read_file_xmit(struct seq_fil
  132. PR("MPDUs XRetried: ", xretries);
  133. PR("Aggregates: ", a_aggr);
  134. PR("AMPDUs Queued HW:", a_queued_hw);
  135. - PR("AMPDUs Queued SW:", a_queued_sw);
  136. PR("AMPDUs Completed:", a_completed);
  137. PR("AMPDUs Retried: ", a_retries);
  138. PR("AMPDUs XRetried: ", a_xretries);
  139. @@ -629,8 +628,7 @@ static void print_queue(struct ath_softc
  140. seq_printf(file, "%s: %d ", "qnum", txq->axq_qnum);
  141. seq_printf(file, "%s: %2d ", "qdepth", txq->axq_depth);
  142. seq_printf(file, "%s: %2d ", "ampdu-depth", txq->axq_ampdu_depth);
  143. - seq_printf(file, "%s: %3d ", "pending", txq->pending_frames);
  144. - seq_printf(file, "%s: %d\n", "stopped", txq->stopped);
  145. + seq_printf(file, "%s: %3d\n", "pending", txq->pending_frames);
  146. ath_txq_unlock(sc, txq);
  147. }
  148. @@ -1208,7 +1206,6 @@ static const char ath9k_gstrings_stats[]
  149. AMKSTR(d_tx_mpdu_xretries),
  150. AMKSTR(d_tx_aggregates),
  151. AMKSTR(d_tx_ampdus_queued_hw),
  152. - AMKSTR(d_tx_ampdus_queued_sw),
  153. AMKSTR(d_tx_ampdus_completed),
  154. AMKSTR(d_tx_ampdu_retries),
  155. AMKSTR(d_tx_ampdu_xretries),
  156. @@ -1288,7 +1285,6 @@ void ath9k_get_et_stats(struct ieee80211
  157. AWDATA(xretries);
  158. AWDATA(a_aggr);
  159. AWDATA(a_queued_hw);
  160. - AWDATA(a_queued_sw);
  161. AWDATA(a_completed);
  162. AWDATA(a_retries);
  163. AWDATA(a_xretries);
  164. @@ -1346,14 +1342,6 @@ int ath9k_init_debug(struct ath_hw *ah)
  165. read_file_xmit);
  166. debugfs_create_devm_seqfile(sc->dev, "queues", sc->debug.debugfs_phy,
  167. read_file_queues);
  168. - debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
  169. - &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
  170. - debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
  171. - &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
  172. - debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
  173. - &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
  174. - debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
  175. - &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
  176. debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
  177. read_file_misc);
  178. debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
  179. --- a/drivers/net/wireless/ath/ath9k/debug.h
  180. +++ b/drivers/net/wireless/ath/ath9k/debug.h
  181. @@ -147,7 +147,6 @@ struct ath_interrupt_stats {
  182. * @completed: Total MPDUs (non-aggr) completed
  183. * @a_aggr: Total no. of aggregates queued
  184. * @a_queued_hw: Total AMPDUs queued to hardware
  185. - * @a_queued_sw: Total AMPDUs queued to software queues
  186. * @a_completed: Total AMPDUs completed
  187. * @a_retries: No. of AMPDUs retried (SW)
  188. * @a_xretries: No. of AMPDUs dropped due to xretries
  189. @@ -174,7 +173,6 @@ struct ath_tx_stats {
  190. u32 xretries;
  191. u32 a_aggr;
  192. u32 a_queued_hw;
  193. - u32 a_queued_sw;
  194. u32 a_completed;
  195. u32 a_retries;
  196. u32 a_xretries;
  197. --- a/drivers/net/wireless/ath/ath9k/debug_sta.c
  198. +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
  199. @@ -52,8 +52,8 @@ static ssize_t read_file_node_aggr(struc
  200. "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
  201. "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
  202. - for (tidno = 0, tid = &an->tid[tidno];
  203. - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
  204. + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
  205. + tid = ath_node_to_tid(an, tidno);
  206. txq = tid->txq;
  207. ath_txq_lock(sc, txq);
  208. if (tid->active) {
  209. --- a/drivers/net/wireless/ath/ath9k/init.c
  210. +++ b/drivers/net/wireless/ath/ath9k/init.c
  211. @@ -358,7 +358,6 @@ static int ath9k_init_queues(struct ath_
  212. for (i = 0; i < IEEE80211_NUM_ACS; i++) {
  213. sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
  214. sc->tx.txq_map[i]->mac80211_qnum = i;
  215. - sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
  216. }
  217. return 0;
  218. }
  219. @@ -877,6 +876,7 @@ static void ath9k_set_hw_capab(struct at
  220. hw->max_rate_tries = 10;
  221. hw->sta_data_size = sizeof(struct ath_node);
  222. hw->vif_data_size = sizeof(struct ath_vif);
  223. + hw->txq_data_size = sizeof(struct ath_atx_tid);
  224. hw->extra_tx_headroom = 4;
  225. hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
  226. --- a/drivers/net/wireless/ath/ath9k/main.c
  227. +++ b/drivers/net/wireless/ath/ath9k/main.c
  228. @@ -1902,9 +1902,11 @@ static int ath9k_ampdu_action(struct iee
  229. bool flush = false;
  230. int ret = 0;
  231. struct ieee80211_sta *sta = params->sta;
  232. + struct ath_node *an = (struct ath_node *)sta->drv_priv;
  233. enum ieee80211_ampdu_mlme_action action = params->action;
  234. u16 tid = params->tid;
  235. u16 *ssn = &params->ssn;
  236. + struct ath_atx_tid *atid;
  237. mutex_lock(&sc->mutex);
  238. @@ -1937,9 +1939,9 @@ static int ath9k_ampdu_action(struct iee
  239. ath9k_ps_restore(sc);
  240. break;
  241. case IEEE80211_AMPDU_TX_OPERATIONAL:
  242. - ath9k_ps_wakeup(sc);
  243. - ath_tx_aggr_resume(sc, sta, tid);
  244. - ath9k_ps_restore(sc);
  245. + atid = ath_node_to_tid(an, tid);
  246. + atid->baw_size = IEEE80211_MIN_AMPDU_BUF <<
  247. + sta->ht_cap.ampdu_factor;
  248. break;
  249. default:
  250. ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
  251. @@ -2701,4 +2703,5 @@ struct ieee80211_ops ath9k_ops = {
  252. .sw_scan_start = ath9k_sw_scan_start,
  253. .sw_scan_complete = ath9k_sw_scan_complete,
  254. .get_txpower = ath9k_get_txpower,
  255. + .wake_tx_queue = ath9k_wake_tx_queue,
  256. };
  257. --- a/drivers/net/wireless/ath/ath9k/xmit.c
  258. +++ b/drivers/net/wireless/ath/ath9k/xmit.c
  259. @@ -67,6 +67,8 @@ static struct ath_buf *ath_tx_setup_buff
  260. struct ath_txq *txq,
  261. struct ath_atx_tid *tid,
  262. struct sk_buff *skb);
  263. +static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
  264. + struct ath_tx_control *txctl);
  265. enum {
  266. MCS_HT20,
  267. @@ -137,6 +139,26 @@ static void ath_tx_queue_tid(struct ath_
  268. list_add_tail(&tid->list, list);
  269. }
  270. +void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
  271. +{
  272. + struct ath_softc *sc = hw->priv;
  273. + struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  274. + struct ath_atx_tid *tid = (struct ath_atx_tid *) queue->drv_priv;
  275. + struct ath_txq *txq = tid->txq;
  276. +
  277. + ath_dbg(common, QUEUE, "Waking TX queue: %pM (%d)\n",
  278. + queue->sta ? queue->sta->addr : queue->vif->addr,
  279. + tid->tidno);
  280. +
  281. + ath_txq_lock(sc, txq);
  282. +
  283. + tid->has_queued = true;
  284. + ath_tx_queue_tid(sc, txq, tid);
  285. + ath_txq_schedule(sc, txq);
  286. +
  287. + ath_txq_unlock(sc, txq);
  288. +}
  289. +
  290. static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
  291. {
  292. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  293. @@ -164,7 +186,6 @@ static void ath_set_rates(struct ieee802
  294. static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
  295. struct sk_buff *skb)
  296. {
  297. - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  298. struct ath_frame_info *fi = get_frame_info(skb);
  299. int q = fi->txq;
  300. @@ -175,14 +196,6 @@ static void ath_txq_skb_done(struct ath_
  301. if (WARN_ON(--txq->pending_frames < 0))
  302. txq->pending_frames = 0;
  303. - if (txq->stopped &&
  304. - txq->pending_frames < sc->tx.txq_max_pending[q]) {
  305. - if (ath9k_is_chanctx_enabled())
  306. - ieee80211_wake_queue(sc->hw, info->hw_queue);
  307. - else
  308. - ieee80211_wake_queue(sc->hw, q);
  309. - txq->stopped = false;
  310. - }
  311. }
  312. static struct ath_atx_tid *
  313. @@ -192,9 +205,48 @@ ath_get_skb_tid(struct ath_softc *sc, st
  314. return ATH_AN_2_TID(an, tidno);
  315. }
  316. +static struct sk_buff *
  317. +ath_tid_pull(struct ath_atx_tid *tid)
  318. +{
  319. + struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv);
  320. + struct ath_softc *sc = tid->an->sc;
  321. + struct ieee80211_hw *hw = sc->hw;
  322. + struct ath_tx_control txctl = {
  323. + .txq = tid->txq,
  324. + .sta = tid->an->sta,
  325. + };
  326. + struct sk_buff *skb;
  327. + struct ath_frame_info *fi;
  328. + int q;
  329. +
  330. + if (!tid->has_queued)
  331. + return NULL;
  332. +
  333. + skb = ieee80211_tx_dequeue(hw, txq);
  334. + if (!skb) {
  335. + tid->has_queued = false;
  336. + return NULL;
  337. + }
  338. +
  339. + if (ath_tx_prepare(hw, skb, &txctl)) {
  340. + ieee80211_free_txskb(hw, skb);
  341. + return NULL;
  342. + }
  343. +
  344. + q = skb_get_queue_mapping(skb);
  345. + if (tid->txq == sc->tx.txq_map[q]) {
  346. + fi = get_frame_info(skb);
  347. + fi->txq = q;
  348. + ++tid->txq->pending_frames;
  349. + }
  350. +
  351. + return skb;
  352. + }
  353. +
  354. +
  355. static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
  356. {
  357. - return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
  358. + return !skb_queue_empty(&tid->retry_q) || tid->has_queued;
  359. }
  360. static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
  361. @@ -203,46 +255,11 @@ static struct sk_buff *ath_tid_dequeue(s
  362. skb = __skb_dequeue(&tid->retry_q);
  363. if (!skb)
  364. - skb = __skb_dequeue(&tid->buf_q);
  365. + skb = ath_tid_pull(tid);
  366. return skb;
  367. }
  368. -/*
  369. - * ath_tx_tid_change_state:
  370. - * - clears a-mpdu flag of previous session
  371. - * - force sequence number allocation to fix next BlockAck Window
  372. - */
  373. -static void
  374. -ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
  375. -{
  376. - struct ath_txq *txq = tid->txq;
  377. - struct ieee80211_tx_info *tx_info;
  378. - struct sk_buff *skb, *tskb;
  379. - struct ath_buf *bf;
  380. - struct ath_frame_info *fi;
  381. -
  382. - skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
  383. - fi = get_frame_info(skb);
  384. - bf = fi->bf;
  385. -
  386. - tx_info = IEEE80211_SKB_CB(skb);
  387. - tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  388. -
  389. - if (bf)
  390. - continue;
  391. -
  392. - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
  393. - if (!bf) {
  394. - __skb_unlink(skb, &tid->buf_q);
  395. - ath_txq_skb_done(sc, txq, skb);
  396. - ieee80211_free_txskb(sc->hw, skb);
  397. - continue;
  398. - }
  399. - }
  400. -
  401. -}
  402. -
  403. static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
  404. {
  405. struct ath_txq *txq = tid->txq;
  406. @@ -883,20 +900,16 @@ static int ath_compute_num_delims(struct
  407. static struct ath_buf *
  408. ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
  409. - struct ath_atx_tid *tid, struct sk_buff_head **q)
  410. + struct ath_atx_tid *tid)
  411. {
  412. struct ieee80211_tx_info *tx_info;
  413. struct ath_frame_info *fi;
  414. - struct sk_buff *skb;
  415. + struct sk_buff *skb, *first_skb = NULL;
  416. struct ath_buf *bf;
  417. u16 seqno;
  418. while (1) {
  419. - *q = &tid->retry_q;
  420. - if (skb_queue_empty(*q))
  421. - *q = &tid->buf_q;
  422. -
  423. - skb = skb_peek(*q);
  424. + skb = ath_tid_dequeue(tid);
  425. if (!skb)
  426. break;
  427. @@ -908,7 +921,6 @@ ath_tx_get_tid_subframe(struct ath_softc
  428. bf->bf_state.stale = false;
  429. if (!bf) {
  430. - __skb_unlink(skb, *q);
  431. ath_txq_skb_done(sc, txq, skb);
  432. ieee80211_free_txskb(sc->hw, skb);
  433. continue;
  434. @@ -937,8 +949,20 @@ ath_tx_get_tid_subframe(struct ath_softc
  435. seqno = bf->bf_state.seqno;
  436. /* do not step over block-ack window */
  437. - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
  438. + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
  439. + __skb_queue_tail(&tid->retry_q, skb);
  440. +
  441. + /* If there are other skbs in the retry q, they are
  442. + * probably within the BAW, so loop immediately to get
  443. + * one of them. Otherwise the queue can get stuck. */
  444. + if (!skb_queue_is_first(&tid->retry_q, skb) &&
  445. + !WARN_ON(skb == first_skb)) {
  446. + if(!first_skb) /* infinite loop prevention */
  447. + first_skb = skb;
  448. + continue;
  449. + }
  450. break;
  451. + }
  452. if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
  453. struct ath_tx_status ts = {};
  454. @@ -946,7 +970,6 @@ ath_tx_get_tid_subframe(struct ath_softc
  455. INIT_LIST_HEAD(&bf_head);
  456. list_add(&bf->list, &bf_head);
  457. - __skb_unlink(skb, *q);
  458. ath_tx_update_baw(sc, tid, seqno);
  459. ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
  460. continue;
  461. @@ -958,11 +981,10 @@ ath_tx_get_tid_subframe(struct ath_softc
  462. return NULL;
  463. }
  464. -static bool
  465. +static int
  466. ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
  467. struct ath_atx_tid *tid, struct list_head *bf_q,
  468. - struct ath_buf *bf_first, struct sk_buff_head *tid_q,
  469. - int *aggr_len)
  470. + struct ath_buf *bf_first)
  471. {
  472. #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
  473. struct ath_buf *bf = bf_first, *bf_prev = NULL;
  474. @@ -972,12 +994,13 @@ ath_tx_form_aggr(struct ath_softc *sc, s
  475. struct ieee80211_tx_info *tx_info;
  476. struct ath_frame_info *fi;
  477. struct sk_buff *skb;
  478. - bool closed = false;
  479. +
  480. bf = bf_first;
  481. aggr_limit = ath_lookup_rate(sc, bf, tid);
  482. - do {
  483. + while (bf)
  484. + {
  485. skb = bf->bf_mpdu;
  486. fi = get_frame_info(skb);
  487. @@ -986,12 +1009,12 @@ ath_tx_form_aggr(struct ath_softc *sc, s
  488. if (nframes) {
  489. if (aggr_limit < al + bpad + al_delta ||
  490. ath_lookup_legacy(bf) || nframes >= h_baw)
  491. - break;
  492. + goto stop;
  493. tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  494. if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
  495. !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
  496. - break;
  497. + goto stop;
  498. }
  499. /* add padding for previous frame to aggregation length */
  500. @@ -1013,20 +1036,18 @@ ath_tx_form_aggr(struct ath_softc *sc, s
  501. ath_tx_addto_baw(sc, tid, bf);
  502. bf->bf_state.ndelim = ndelim;
  503. - __skb_unlink(skb, tid_q);
  504. list_add_tail(&bf->list, bf_q);
  505. if (bf_prev)
  506. bf_prev->bf_next = bf;
  507. bf_prev = bf;
  508. - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
  509. - if (!bf) {
  510. - closed = true;
  511. - break;
  512. - }
  513. - } while (ath_tid_has_buffered(tid));
  514. -
  515. + bf = ath_tx_get_tid_subframe(sc, txq, tid);
  516. + }
  517. + goto finish;
  518. +stop:
  519. + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
  520. +finish:
  521. bf = bf_first;
  522. bf->bf_lastbf = bf_prev;
  523. @@ -1037,9 +1058,7 @@ ath_tx_form_aggr(struct ath_softc *sc, s
  524. TX_STAT_INC(txq->axq_qnum, a_aggr);
  525. }
  526. - *aggr_len = al;
  527. -
  528. - return closed;
  529. + return al;
  530. #undef PADBYTES
  531. }
  532. @@ -1416,18 +1435,15 @@ static void ath_tx_fill_desc(struct ath_
  533. static void
  534. ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
  535. struct ath_atx_tid *tid, struct list_head *bf_q,
  536. - struct ath_buf *bf_first, struct sk_buff_head *tid_q)
  537. + struct ath_buf *bf_first)
  538. {
  539. struct ath_buf *bf = bf_first, *bf_prev = NULL;
  540. - struct sk_buff *skb;
  541. int nframes = 0;
  542. do {
  543. struct ieee80211_tx_info *tx_info;
  544. - skb = bf->bf_mpdu;
  545. nframes++;
  546. - __skb_unlink(skb, tid_q);
  547. list_add_tail(&bf->list, bf_q);
  548. if (bf_prev)
  549. bf_prev->bf_next = bf;
  550. @@ -1436,13 +1452,15 @@ ath_tx_form_burst(struct ath_softc *sc,
  551. if (nframes >= 2)
  552. break;
  553. - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
  554. + bf = ath_tx_get_tid_subframe(sc, txq, tid);
  555. if (!bf)
  556. break;
  557. tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  558. - if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
  559. + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
  560. + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
  561. break;
  562. + }
  563. ath_set_rates(tid->an->vif, tid->an->sta, bf);
  564. } while (1);
  565. @@ -1453,34 +1471,33 @@ static bool ath_tx_sched_aggr(struct ath
  566. {
  567. struct ath_buf *bf;
  568. struct ieee80211_tx_info *tx_info;
  569. - struct sk_buff_head *tid_q;
  570. struct list_head bf_q;
  571. int aggr_len = 0;
  572. - bool aggr, last = true;
  573. + bool aggr;
  574. if (!ath_tid_has_buffered(tid))
  575. return false;
  576. INIT_LIST_HEAD(&bf_q);
  577. - bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
  578. + bf = ath_tx_get_tid_subframe(sc, txq, tid);
  579. if (!bf)
  580. return false;
  581. tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
  582. aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
  583. if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
  584. - (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
  585. + (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
  586. + __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
  587. *stop = true;
  588. return false;
  589. }
  590. ath_set_rates(tid->an->vif, tid->an->sta, bf);
  591. if (aggr)
  592. - last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
  593. - tid_q, &aggr_len);
  594. + aggr_len = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf);
  595. else
  596. - ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
  597. + ath_tx_form_burst(sc, txq, tid, &bf_q, bf);
  598. if (list_empty(&bf_q))
  599. return false;
  600. @@ -1523,9 +1540,6 @@ int ath_tx_aggr_start(struct ath_softc *
  601. an->mpdudensity = density;
  602. }
  603. - /* force sequence number allocation for pending frames */
  604. - ath_tx_tid_change_state(sc, txtid);
  605. -
  606. txtid->active = true;
  607. *ssn = txtid->seq_start = txtid->seq_next;
  608. txtid->bar_index = -1;
  609. @@ -1550,7 +1564,6 @@ void ath_tx_aggr_stop(struct ath_softc *
  610. ath_txq_lock(sc, txq);
  611. txtid->active = false;
  612. ath_tx_flush_tid(sc, txtid);
  613. - ath_tx_tid_change_state(sc, txtid);
  614. ath_txq_unlock_complete(sc, txq);
  615. }
  616. @@ -1560,14 +1573,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
  617. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  618. struct ath_atx_tid *tid;
  619. struct ath_txq *txq;
  620. - bool buffered;
  621. int tidno;
  622. ath_dbg(common, XMIT, "%s called\n", __func__);
  623. - for (tidno = 0, tid = &an->tid[tidno];
  624. - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
  625. -
  626. + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
  627. + tid = ath_node_to_tid(an, tidno);
  628. txq = tid->txq;
  629. ath_txq_lock(sc, txq);
  630. @@ -1577,13 +1588,12 @@ void ath_tx_aggr_sleep(struct ieee80211_
  631. continue;
  632. }
  633. - buffered = ath_tid_has_buffered(tid);
  634. + if (!skb_queue_empty(&tid->retry_q))
  635. + ieee80211_sta_set_buffered(sta, tid->tidno, true);
  636. list_del_init(&tid->list);
  637. ath_txq_unlock(sc, txq);
  638. -
  639. - ieee80211_sta_set_buffered(sta, tidno, buffered);
  640. }
  641. }
  642. @@ -1596,49 +1606,20 @@ void ath_tx_aggr_wakeup(struct ath_softc
  643. ath_dbg(common, XMIT, "%s called\n", __func__);
  644. - for (tidno = 0, tid = &an->tid[tidno];
  645. - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
  646. -
  647. + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
  648. + tid = ath_node_to_tid(an, tidno);
  649. txq = tid->txq;
  650. ath_txq_lock(sc, txq);
  651. tid->clear_ps_filter = true;
  652. -
  653. if (ath_tid_has_buffered(tid)) {
  654. ath_tx_queue_tid(sc, txq, tid);
  655. ath_txq_schedule(sc, txq);
  656. }
  657. -
  658. ath_txq_unlock_complete(sc, txq);
  659. }
  660. }
  661. -void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
  662. - u16 tidno)
  663. -{
  664. - struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  665. - struct ath_atx_tid *tid;
  666. - struct ath_node *an;
  667. - struct ath_txq *txq;
  668. -
  669. - ath_dbg(common, XMIT, "%s called\n", __func__);
  670. -
  671. - an = (struct ath_node *)sta->drv_priv;
  672. - tid = ATH_AN_2_TID(an, tidno);
  673. - txq = tid->txq;
  674. -
  675. - ath_txq_lock(sc, txq);
  676. -
  677. - tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
  678. -
  679. - if (ath_tid_has_buffered(tid)) {
  680. - ath_tx_queue_tid(sc, txq, tid);
  681. - ath_txq_schedule(sc, txq);
  682. - }
  683. -
  684. - ath_txq_unlock_complete(sc, txq);
  685. -}
  686. -
  687. void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
  688. struct ieee80211_sta *sta,
  689. u16 tids, int nframes,
  690. @@ -1651,7 +1632,6 @@ void ath9k_release_buffered_frames(struc
  691. struct ieee80211_tx_info *info;
  692. struct list_head bf_q;
  693. struct ath_buf *bf_tail = NULL, *bf;
  694. - struct sk_buff_head *tid_q;
  695. int sent = 0;
  696. int i;
  697. @@ -1666,11 +1646,10 @@ void ath9k_release_buffered_frames(struc
  698. ath_txq_lock(sc, tid->txq);
  699. while (nframes > 0) {
  700. - bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
  701. + bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
  702. if (!bf)
  703. break;
  704. - __skb_unlink(bf->bf_mpdu, tid_q);
  705. list_add_tail(&bf->list, &bf_q);
  706. ath_set_rates(tid->an->vif, tid->an->sta, bf);
  707. if (bf_isampdu(bf)) {
  708. @@ -1685,7 +1664,7 @@ void ath9k_release_buffered_frames(struc
  709. sent++;
  710. TX_STAT_INC(txq->axq_qnum, a_queued_hw);
  711. - if (an->sta && !ath_tid_has_buffered(tid))
  712. + if (an->sta && skb_queue_empty(&tid->retry_q))
  713. ieee80211_sta_set_buffered(an->sta, i, false);
  714. }
  715. ath_txq_unlock_complete(sc, tid->txq);
  716. @@ -1914,13 +1893,7 @@ bool ath_drain_all_txq(struct ath_softc
  717. if (!ATH_TXQ_SETUP(sc, i))
  718. continue;
  719. - /*
  720. - * The caller will resume queues with ieee80211_wake_queues.
  721. - * Mark the queue as not stopped to prevent ath_tx_complete
  722. - * from waking the queue too early.
  723. - */
  724. txq = &sc->tx.txq[i];
  725. - txq->stopped = false;
  726. ath_draintxq(sc, txq);
  727. }
  728. @@ -2319,16 +2292,14 @@ int ath_tx_start(struct ieee80211_hw *hw
  729. struct ath_softc *sc = hw->priv;
  730. struct ath_txq *txq = txctl->txq;
  731. struct ath_atx_tid *tid = NULL;
  732. + struct ath_node *an = NULL;
  733. struct ath_buf *bf;
  734. - bool queue, skip_uapsd = false, ps_resp;
  735. + bool ps_resp;
  736. int q, ret;
  737. if (vif)
  738. avp = (void *)vif->drv_priv;
  739. - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
  740. - txctl->force_channel = true;
  741. -
  742. ps_resp = !!(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE);
  743. ret = ath_tx_prepare(hw, skb, txctl);
  744. @@ -2343,63 +2314,18 @@ int ath_tx_start(struct ieee80211_hw *hw
  745. q = skb_get_queue_mapping(skb);
  746. - ath_txq_lock(sc, txq);
  747. - if (txq == sc->tx.txq_map[q]) {
  748. - fi->txq = q;
  749. - if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
  750. - !txq->stopped) {
  751. - if (ath9k_is_chanctx_enabled())
  752. - ieee80211_stop_queue(sc->hw, info->hw_queue);
  753. - else
  754. - ieee80211_stop_queue(sc->hw, q);
  755. - txq->stopped = true;
  756. - }
  757. - }
  758. -
  759. - queue = ieee80211_is_data_present(hdr->frame_control);
  760. -
  761. - /* If chanctx, queue all null frames while NOA could be there */
  762. - if (ath9k_is_chanctx_enabled() &&
  763. - ieee80211_is_nullfunc(hdr->frame_control) &&
  764. - !txctl->force_channel)
  765. - queue = true;
  766. -
  767. - /* Force queueing of all frames that belong to a virtual interface on
  768. - * a different channel context, to ensure that they are sent on the
  769. - * correct channel.
  770. - */
  771. - if (((avp && avp->chanctx != sc->cur_chan) ||
  772. - sc->cur_chan->stopped) && !txctl->force_channel) {
  773. - if (!txctl->an)
  774. - txctl->an = &avp->mcast_node;
  775. - queue = true;
  776. - skip_uapsd = true;
  777. - }
  778. -
  779. - if (txctl->an && queue)
  780. - tid = ath_get_skb_tid(sc, txctl->an, skb);
  781. -
  782. - if (!skip_uapsd && ps_resp) {
  783. - ath_txq_unlock(sc, txq);
  784. + if (ps_resp)
  785. txq = sc->tx.uapsdq;
  786. - ath_txq_lock(sc, txq);
  787. - } else if (txctl->an && queue) {
  788. - WARN_ON(tid->txq != txctl->txq);
  789. -
  790. - if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
  791. - tid->clear_ps_filter = true;
  792. - /*
  793. - * Add this frame to software queue for scheduling later
  794. - * for aggregation.
  795. - */
  796. - TX_STAT_INC(txq->axq_qnum, a_queued_sw);
  797. - __skb_queue_tail(&tid->buf_q, skb);
  798. - if (!txctl->an->sleeping)
  799. - ath_tx_queue_tid(sc, txq, tid);
  800. + if (txctl->sta) {
  801. + an = (struct ath_node *) sta->drv_priv;
  802. + tid = ath_get_skb_tid(sc, an, skb);
  803. + }
  804. - ath_txq_schedule(sc, txq);
  805. - goto out;
  806. + ath_txq_lock(sc, txq);
  807. + if (txq == sc->tx.txq_map[q]) {
  808. + fi->txq = q;
  809. + ++txq->pending_frames;
  810. }
  811. bf = ath_tx_setup_buffer(sc, txq, tid, skb);
  812. @@ -2892,9 +2818,8 @@ void ath_tx_node_init(struct ath_softc *
  813. struct ath_atx_tid *tid;
  814. int tidno, acno;
  815. - for (tidno = 0, tid = &an->tid[tidno];
  816. - tidno < IEEE80211_NUM_TIDS;
  817. - tidno++, tid++) {
  818. + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
  819. + tid = ath_node_to_tid(an, tidno);
  820. tid->an = an;
  821. tid->tidno = tidno;
  822. tid->seq_start = tid->seq_next = 0;
  823. @@ -2902,11 +2827,14 @@ void ath_tx_node_init(struct ath_softc *
  824. tid->baw_head = tid->baw_tail = 0;
  825. tid->active = false;
  826. tid->clear_ps_filter = true;
  827. - __skb_queue_head_init(&tid->buf_q);
  828. + tid->has_queued = false;
  829. __skb_queue_head_init(&tid->retry_q);
  830. INIT_LIST_HEAD(&tid->list);
  831. acno = TID_TO_WME_AC(tidno);
  832. tid->txq = sc->tx.txq_map[acno];
  833. +
  834. + if (!an->sta)
  835. + break; /* just one multicast ath_atx_tid */
  836. }
  837. }
  838. @@ -2916,9 +2844,8 @@ void ath_tx_node_cleanup(struct ath_soft
  839. struct ath_txq *txq;
  840. int tidno;
  841. - for (tidno = 0, tid = &an->tid[tidno];
  842. - tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
  843. -
  844. + for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
  845. + tid = ath_node_to_tid(an, tidno);
  846. txq = tid->txq;
  847. ath_txq_lock(sc, txq);
  848. @@ -2930,6 +2857,9 @@ void ath_tx_node_cleanup(struct ath_soft
  849. tid->active = false;
  850. ath_txq_unlock(sc, txq);
  851. +
  852. + if (!an->sta)
  853. + break; /* just one multicast ath_atx_tid */
  854. }
  855. }