if_owl.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159
  1. /*
  2. * Copyright (c) 2013 Qualcomm Atheros, Inc.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted (subject to the limitations in the
  7. * disclaimer below) provided that the following conditions are met:
  8. *
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. *
  12. * * Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the
  15. * distribution.
  16. *
  17. * * Neither the name of Qualcomm Atheros nor the names of its
  18. * contributors may be used to endorse or promote products derived
  19. * from this software without specific prior written permission.
  20. *
  21. * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
  22. * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
  23. * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
  24. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  26. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  27. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  30. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  31. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  32. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  33. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  34. */
  35. #include <adf_os_types.h>
  36. #include <adf_os_dma.h>
  37. #include <adf_os_timer.h>
  38. #include <adf_os_lock.h>
  39. #include <adf_os_io.h>
  40. #include <adf_os_mem.h>
  41. #include <adf_os_util.h>
  42. #include <adf_os_stdtypes.h>
  43. #include <adf_os_defer.h>
  44. #include <adf_os_atomic.h>
  45. #include <adf_nbuf.h>
  46. #include <adf_net.h>
  47. #include <adf_net_wcmd.h>
  48. #include "if_llc.h"
  49. #ifdef USE_HEADERLEN_RESV
  50. #include <if_llc.h>
  51. #endif
  52. #include <ieee80211_var.h>
  53. #include "if_athrate.h"
  54. #include "if_athvar.h"
  55. #include "ah_desc.h"
  56. #define ath_tgt_free_skb adf_nbuf_free
  57. #define OFDM_PLCP_BITS 22
  58. #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
  59. #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
  60. #define L_STF 8
  61. #define L_LTF 8
  62. #define L_SIG 4
  63. #define HT_SIG 8
  64. #define HT_STF 4
  65. #define HT_LTF(_ns) (4 * (_ns))
  66. #define SYMBOL_TIME(_ns) ((_ns) << 2) // ns * 4 us
  67. #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) // ns * 3.6 us
  68. static a_uint16_t bits_per_symbol[][2] = {
  69. /* 20MHz 40MHz */
  70. { 26, 54 }, // 0: BPSK
  71. { 52, 108 }, // 1: QPSK 1/2
  72. { 78, 162 }, // 2: QPSK 3/4
  73. { 104, 216 }, // 3: 16-QAM 1/2
  74. { 156, 324 }, // 4: 16-QAM 3/4
  75. { 208, 432 }, // 5: 64-QAM 2/3
  76. { 234, 486 }, // 6: 64-QAM 3/4
  77. { 260, 540 }, // 7: 64-QAM 5/6
  78. { 52, 108 }, // 8: BPSK
  79. { 104, 216 }, // 9: QPSK 1/2
  80. { 156, 324 }, // 10: QPSK 3/4
  81. { 208, 432 }, // 11: 16-QAM 1/2
  82. { 312, 648 }, // 12: 16-QAM 3/4
  83. { 416, 864 }, // 13: 64-QAM 2/3
  84. { 468, 972 }, // 14: 64-QAM 3/4
  85. { 520, 1080 }, // 15: 64-QAM 5/6
  86. };
  87. void owltgt_tx_processq(struct ath_softc_tgt *sc, struct ath_txq *txq,
  88. owl_txq_state_t txqstate);
  89. static void ath_tgt_txqaddbuf(struct ath_softc_tgt *sc, struct ath_txq *txq,
  90. struct ath_tx_buf *bf, struct ath_tx_desc *lastds);
  91. void ath_rate_findrate_11n_Hardcoded(struct ath_softc_tgt *sc,
  92. struct ath_rc_series series[]);
  93. void ath_buf_set_rate_Hardcoded(struct ath_softc_tgt *sc,
  94. struct ath_tx_buf *bf) ;
  95. static a_int32_t ath_tgt_txbuf_setup(struct ath_softc_tgt *sc,
  96. struct ath_tx_buf *bf, ath_data_hdr_t *dh);
  97. static void ath_tx_freebuf(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  98. static void ath_tx_uc_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  99. static void ath_update_stats(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  100. void adf_print_buf(adf_nbuf_t buf);
  101. static void ath_tgt_tx_enqueue(struct ath_txq *txq, struct ath_atx_tid *tid);
  102. void ath_tgt_tx_comp_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  103. struct ieee80211_frame *ATH_SKB_2_WH(adf_nbuf_t skb);
  104. void ath_tgt_tx_send_normal(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  105. static void ath_tgt_tx_sched_normal(struct ath_softc_tgt *sc, ath_atx_tid_t *tid);
  106. static void ath_tgt_tx_sched_aggr(struct ath_softc_tgt *sc, ath_atx_tid_t *tid);
  107. extern a_int32_t ath_chainmask_sel_logic(void *);
  108. static a_int32_t ath_get_pktlen(struct ath_tx_buf *bf, a_int32_t hdrlen);
  109. static void ath_tgt_txq_schedule(struct ath_softc_tgt *sc, struct ath_txq *txq);
  110. typedef void (*ath_ft_set_atype_t)(struct ath_softc_tgt *sc, struct ath_buf *bf);
  111. static void
  112. ath_tx_set_retry(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  113. static void
  114. ath_bar_tx(struct ath_softc_tgt *sc, ath_atx_tid_t *tid, struct ath_tx_buf *bf);
  115. static void
  116. ath_tx_update_baw(ath_atx_tid_t *tid, int seqno);
  117. static void
  118. ath_tx_retry_subframe(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
  119. ath_tx_bufhead *bf_q, struct ath_tx_buf **bar);
  120. static void
  121. ath_tx_comp_aggr_error(struct ath_softc_tgt *sc, struct ath_tx_buf *bf, ath_atx_tid_t *tid);
  122. void ath_tx_addto_baw(ath_atx_tid_t *tid, struct ath_tx_buf *bf);
  123. static inline void ath_tx_retry_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  124. static void ath_tx_comp_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  125. static void ath_update_aggr_stats(struct ath_softc_tgt *sc, struct ath_tx_desc *ds,
  126. int nframes, int nbad);
  127. static inline void ath_aggr_resume_tid(struct ath_softc_tgt *sc, ath_atx_tid_t *tid);
  128. static void ath_tx_comp_cleanup(struct ath_softc_tgt *sc, struct ath_tx_buf *bf);
  129. int ath_tgt_tx_add_to_aggr(struct ath_softc_tgt *sc,
  130. struct ath_buf *bf,int datatype,
  131. ath_atx_tid_t *tid, int is_burst);
  132. struct ieee80211_frame *ATH_SKB_2_WH(adf_nbuf_t skb)
  133. {
  134. a_uint8_t *anbdata;
  135. a_uint32_t anblen;
  136. adf_nbuf_peek_header(skb, &anbdata, &anblen);
  137. return((struct ieee80211_frame *)anbdata);
  138. }
  139. #undef adf_os_cpu_to_le16
  140. static a_uint16_t adf_os_cpu_to_le16(a_uint16_t x)
  141. {
  142. return ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8));
  143. }
  144. static inline void
  145. ath_aggr_resume_tid(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
  146. {
  147. struct ath_txq *txq;
  148. txq = TID_TO_ACTXQ(tid->tidno);
  149. tid->paused = 0;
  150. if (asf_tailq_empty(&tid->buf_q))
  151. return;
  152. ath_tgt_tx_enqueue(txq, tid);
  153. ath_tgt_txq_schedule(sc, txq);
  154. }
  155. static inline void
  156. ath_aggr_pause_tid(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
  157. {
  158. tid->paused =1;
  159. }
  160. static a_uint32_t ath_pkt_duration(struct ath_softc_tgt *sc,
  161. a_uint8_t rix, struct ath_tx_buf *bf,
  162. a_int32_t width, a_int32_t half_gi)
  163. {
  164. const HAL_RATE_TABLE *rt = sc->sc_currates;
  165. a_uint32_t nbits, nsymbits, duration, nsymbols;
  166. a_uint8_t rc;
  167. a_int32_t streams;
  168. a_int32_t pktlen;
  169. pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_pktlen;
  170. rc = rt->info[rix].rateCode;
  171. if (!IS_HT_RATE(rc))
  172. return ath_hal_computetxtime(sc->sc_ah, rt, pktlen, rix,
  173. bf->bf_shpream);
  174. nbits = (pktlen << 3) + OFDM_PLCP_BITS;
  175. nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
  176. nsymbols = (nbits + nsymbits - 1) / nsymbits;
  177. if (!half_gi)
  178. duration = SYMBOL_TIME(nsymbols);
  179. else
  180. duration = SYMBOL_TIME_HALFGI(nsymbols);
  181. streams = HT_RC_2_STREAMS(rc);
  182. duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
  183. return duration;
  184. }
  185. static void ath_dma_map(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  186. {
  187. adf_nbuf_t skb = bf->bf_skb;
  188. skb = adf_nbuf_queue_first(&bf->bf_skbhead);
  189. adf_nbuf_map(sc->sc_dev, bf->bf_dmamap, skb, ADF_OS_DMA_TO_DEVICE);
  190. }
  191. static void ath_dma_unmap(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  192. {
  193. adf_nbuf_t skb = bf->bf_skb;
  194. skb = adf_nbuf_queue_first(&bf->bf_skbhead);
  195. adf_nbuf_unmap( sc->sc_dev, bf->bf_dmamap, ADF_OS_DMA_TO_DEVICE);
  196. }
  197. static void ath_filltxdesc(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  198. {
  199. struct ath_tx_desc *ds0, *ds = bf->bf_desc;
  200. struct ath_hal *ah = sc->sc_ah;
  201. a_uint8_t i;
  202. ds0 = ds;
  203. adf_nbuf_dmamap_info(bf->bf_dmamap, &bf->bf_dmamap_info);
  204. for (i = 0; i < bf->bf_dmamap_info.nsegs; i++, ds++) {
  205. ds->ds_data = bf->bf_dmamap_info.dma_segs[i].paddr;
  206. if (i == (bf->bf_dmamap_info.nsegs - 1)) {
  207. ds->ds_link = 0;
  208. bf->bf_lastds = ds;
  209. } else
  210. ds->ds_link = ATH_BUF_GET_DESC_PHY_ADDR_WITH_IDX(bf, i+1);
  211. ah->ah_fillTxDesc(ds
  212. , bf->bf_dmamap_info.dma_segs[i].len
  213. , i == 0
  214. , i == (bf->bf_dmamap_info.nsegs - 1)
  215. , ds0);
  216. }
  217. }
  218. static void ath_tx_tgt_setds(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  219. {
  220. struct ath_tx_desc *ds = bf->bf_desc;
  221. struct ath_hal *ah = sc->sc_ah;
  222. switch (bf->bf_protmode) {
  223. case IEEE80211_PROT_RTSCTS:
  224. bf->bf_flags |= HAL_TXDESC_RTSENA;
  225. break;
  226. case IEEE80211_PROT_CTSONLY:
  227. bf->bf_flags |= HAL_TXDESC_CTSENA;
  228. break;
  229. default:
  230. break;
  231. }
  232. ah->ah_set11nTxDesc(ds
  233. , bf->bf_pktlen
  234. , bf->bf_atype
  235. , 60
  236. , bf->bf_keyix
  237. , bf->bf_keytype
  238. , bf->bf_flags | HAL_TXDESC_INTREQ);
  239. ath_filltxdesc(sc, bf);
  240. }
  241. static struct ath_tx_buf *ath_buf_toggle(struct ath_softc_tgt *sc,
  242. struct ath_tx_buf *bf,
  243. a_uint8_t retry)
  244. {
  245. struct ath_tx_buf *tmp = NULL;
  246. adf_nbuf_t buf = NULL;
  247. adf_os_assert(sc->sc_txbuf_held != NULL);
  248. tmp = sc->sc_txbuf_held;
  249. if (retry) {
  250. ath_dma_unmap(sc, bf);
  251. adf_nbuf_queue_init(&tmp->bf_skbhead);
  252. buf = adf_nbuf_queue_remove(&bf->bf_skbhead);
  253. adf_os_assert(buf);
  254. adf_nbuf_queue_add(&tmp->bf_skbhead, buf);
  255. adf_os_assert(adf_nbuf_queue_len(&bf->bf_skbhead) == 0);
  256. tmp->bf_next = bf->bf_next;
  257. tmp->bf_endpt = bf->bf_endpt;
  258. tmp->bf_tidno = bf->bf_tidno;
  259. tmp->bf_skb = bf->bf_skb;
  260. tmp->bf_node = bf->bf_node;
  261. tmp->bf_isaggr = bf->bf_isaggr;
  262. tmp->bf_flags = bf->bf_flags;
  263. tmp->bf_state = bf->bf_state;
  264. tmp->bf_retries = bf->bf_retries;
  265. tmp->bf_comp = bf->bf_comp;
  266. tmp->bf_nframes = bf->bf_nframes;
  267. tmp->bf_cookie = bf->bf_cookie;
  268. bf->bf_isaggr = 0;
  269. bf->bf_next = NULL;
  270. bf->bf_skb = NULL;
  271. bf->bf_node = NULL;
  272. bf->bf_flags = 0;
  273. bf->bf_comp = NULL;
  274. bf->bf_retries = 0;
  275. bf->bf_nframes = 0;
  276. ath_dma_map(sc, tmp);
  277. ath_tx_tgt_setds(sc, tmp);
  278. }
  279. sc->sc_txbuf_held = bf;
  280. return tmp;
  281. }
  282. static void ath_tgt_skb_free(struct ath_softc_tgt *sc,
  283. adf_nbuf_queue_t *head,
  284. HTC_ENDPOINT_ID endpt)
  285. {
  286. adf_nbuf_t tskb;
  287. while (adf_nbuf_queue_len(head) != 0) {
  288. tskb = adf_nbuf_queue_remove(head);
  289. ath_free_tx_skb(sc->tgt_htc_handle,endpt,tskb);
  290. }
  291. }
  292. static void ath_buf_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  293. {
  294. ath_dma_unmap(sc, bf);
  295. ath_tgt_skb_free(sc, &bf->bf_skbhead,bf->bf_endpt);
  296. bf->bf_skb = NULL;
  297. bf->bf_node = NULL;
  298. bf = ath_buf_toggle(sc, bf, 0);
  299. if (bf != NULL) {
  300. asf_tailq_insert_tail(&sc->sc_txbuf, bf, bf_list);
  301. }
  302. }
  303. static void ath_buf_set_rate(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  304. {
  305. struct ath_hal *ah = sc->sc_ah;
  306. const HAL_RATE_TABLE *rt;
  307. struct ath_tx_desc *ds = bf->bf_desc;
  308. HAL_11N_RATE_SERIES series[4];
  309. a_int32_t i, flags;
  310. a_uint8_t rix, cix, rtsctsrate;
  311. a_int32_t prot_mode = AH_FALSE;
  312. rt = sc->sc_currates;
  313. rix = bf->bf_rcs[0].rix;
  314. flags = (bf->bf_flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA));
  315. cix = rt->info[sc->sc_protrix].controlRate;
  316. if (bf->bf_protmode != IEEE80211_PROT_NONE &&
  317. (rt->info[rix].phy == IEEE80211_T_OFDM ||
  318. rt->info[rix].phy == IEEE80211_T_HT) &&
  319. (bf->bf_flags & HAL_TXDESC_NOACK) == 0) {
  320. cix = rt->info[sc->sc_protrix].controlRate;
  321. prot_mode = AH_TRUE;
  322. } else {
  323. if (ath_hal_getcapability(ah, HAL_CAP_HT) && (!bf->bf_ismcast))
  324. flags = HAL_TXDESC_RTSENA;
  325. for (i = 4; i--;) {
  326. if (bf->bf_rcs[i].tries) {
  327. cix = rt->info[bf->bf_rcs[i].rix].controlRate;
  328. break;
  329. }
  330. }
  331. }
  332. adf_os_mem_set(series, 0, sizeof(HAL_11N_RATE_SERIES) * 4);
  333. for (i = 0; i < 4; i++) {
  334. if (!bf->bf_rcs[i].tries)
  335. continue;
  336. rix = bf->bf_rcs[i].rix;
  337. series[i].Rate = rt->info[rix].rateCode |
  338. (bf->bf_shpream ? rt->info[rix].shortPreamble : 0);
  339. series[i].Tries = bf->bf_rcs[i].tries;
  340. #ifdef MAGPIE_MERLIN
  341. series[i].RateFlags = ((bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
  342. HAL_RATESERIES_RTS_CTS : 0 ) |
  343. ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
  344. HAL_RATESERIES_2040 : 0 ) |
  345. ((bf->bf_rcs[i].flags & ATH_RC_HT40_SGI_FLAG) ?
  346. HAL_RATESERIES_HALFGI : 0 ) |
  347. ((bf->bf_rcs[i].flags & ATH_RC_TX_STBC_FLAG) ?
  348. HAL_RATESERIES_STBC: 0);
  349. #else
  350. series[i].RateFlags = ((bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
  351. HAL_RATESERIES_RTS_CTS : 0 ) |
  352. ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
  353. HAL_RATESERIES_2040 : 0 ) |
  354. ((bf->bf_rcs[i].flags & ATH_RC_HT40_SGI_FLAG) ?
  355. HAL_RATESERIES_HALFGI : 0 );
  356. #endif
  357. series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
  358. (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
  359. (bf->bf_rcs[i].flags & ATH_RC_HT40_SGI_FLAG));
  360. series[i].ChSel = sc->sc_ic.ic_tx_chainmask;
  361. if (prot_mode)
  362. series[i].RateFlags |= HAL_RATESERIES_RTS_CTS;
  363. if (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG)
  364. series[i].RateFlags |= HAL_RATESERIES_RTS_CTS;
  365. }
  366. rtsctsrate = rt->info[cix].rateCode |
  367. (bf->bf_shpream ? rt->info[cix].shortPreamble : 0);
  368. ah->ah_set11nRateScenario(ds, 1,
  369. rtsctsrate,
  370. series, 4,
  371. flags);
  372. }
  373. static void ath_tgt_rate_findrate(struct ath_softc_tgt *sc,
  374. struct ath_node_target *an,
  375. a_int32_t shortPreamble,
  376. size_t frameLen,
  377. a_int32_t numTries,
  378. a_int32_t numRates,
  379. a_int32_t stepDnInc,
  380. a_uint32_t rcflag,
  381. struct ath_rc_series series[],
  382. a_int32_t *isProbe)
  383. {
  384. ath_rate_findrate(sc, an, 1, frameLen, 10, 4, 1,
  385. ATH_RC_PROBE_ALLOWED, series, isProbe);
  386. }
  387. static void owl_tgt_tid_init(struct ath_atx_tid *tid)
  388. {
  389. int i;
  390. tid->seq_start = tid->seq_next = 0;
  391. tid->baw_size = WME_MAX_BA;
  392. tid->baw_head = tid->baw_tail = 0;
  393. tid->paused = 0;
  394. tid->flag = 0;
  395. tid->sched = AH_FALSE;
  396. asf_tailq_init(&tid->buf_q);
  397. for (i = 0; i < ATH_TID_MAX_BUFS; i++) {
  398. TX_BUF_BITMAP_CLR(tid->tx_buf_bitmap, i);
  399. }
  400. }
  401. static void owl_tgt_tid_cleanup(struct ath_softc_tgt *sc,
  402. struct ath_atx_tid *tid)
  403. {
  404. tid->incomp--;
  405. if (tid->incomp) {
  406. return;
  407. }
  408. tid->flag &= ~TID_CLEANUP_INPROGRES;
  409. if (tid->flag & TID_REINITIALIZE) {
  410. adf_os_print("TID REINIT DONE for tid %p\n", tid);
  411. tid->flag &= ~TID_REINITIALIZE;
  412. owl_tgt_tid_init(tid);
  413. } else {
  414. ath_aggr_resume_tid(sc, tid);
  415. }
  416. }
  417. void owl_tgt_node_init(struct ath_node_target * an)
  418. {
  419. struct ath_atx_tid *tid;
  420. int tidno;
  421. for (tidno = 0, tid = &an->tid[tidno]; tidno < WME_NUM_TID;tidno++, tid++) {
  422. tid->tidno = tidno;
  423. tid->an = an;
  424. if ( tid->flag & TID_CLEANUP_INPROGRES ) {
  425. tid->flag |= TID_REINITIALIZE;
  426. adf_os_print("tid[%p]->incomp is not 0: %d\n",
  427. tid, tid->incomp);
  428. } else {
  429. owl_tgt_tid_init(tid);
  430. }
  431. }
  432. }
  433. void ath_tx_status_clear(struct ath_softc_tgt *sc)
  434. {
  435. int i;
  436. for (i = 0; i < 2; i++) {
  437. sc->tx_status[i].cnt = 0;
  438. }
  439. }
  440. static WMI_TXSTATUS_EVENT *ath_tx_status_get(struct ath_softc_tgt *sc)
  441. {
  442. WMI_TXSTATUS_EVENT *txs = NULL;
  443. int i;
  444. for (i = 0; i < 2; i++) {
  445. if (sc->tx_status[i].cnt < HTC_MAX_TX_STATUS) {
  446. txs = &sc->tx_status[i];
  447. break;
  448. }
  449. }
  450. return txs;
  451. }
  452. void ath_tx_status_update(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  453. {
  454. struct ath_tx_desc *ds = bf->bf_lastds;
  455. WMI_TXSTATUS_EVENT *txs;
  456. if (sc->sc_tx_draining)
  457. return;
  458. txs = ath_tx_status_get(sc);
  459. if (txs == NULL)
  460. return;
  461. txs->txstatus[txs->cnt].ts_flags = 0;
  462. txs->txstatus[txs->cnt].cookie = bf->bf_cookie;
  463. txs->txstatus[txs->cnt].ts_rate = SM(bf->bf_endpt, ATH9K_HTC_TXSTAT_EPID);
  464. if (ds->ds_txstat.ts_status & HAL_TXERR_FILT)
  465. txs->txstatus[txs->cnt].ts_flags |= ATH9K_HTC_TXSTAT_FILT;
  466. if (!(ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) &&
  467. !(ds->ds_txstat.ts_status & HAL_TXERR_FIFO) &&
  468. !(ds->ds_txstat.ts_status & HAL_TXERR_TIMER_EXPIRED) &&
  469. !(ds->ds_txstat.ts_status & HAL_TXERR_FILT))
  470. txs->txstatus[txs->cnt].ts_flags |= ATH9K_HTC_TXSTAT_ACK;
  471. ath_tx_status_update_rate(sc, bf->bf_rcs, ds->ds_txstat.ts_rate, txs);
  472. txs->cnt++;
  473. }
  474. void ath_tx_status_update_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
  475. struct ath_tx_desc *ds, struct ath_rc_series rcs[],
  476. int txok)
  477. {
  478. WMI_TXSTATUS_EVENT *txs;
  479. if (sc->sc_tx_draining)
  480. return;
  481. txs = ath_tx_status_get(sc);
  482. if (txs == NULL)
  483. return;
  484. txs->txstatus[txs->cnt].cookie = bf->bf_cookie;
  485. txs->txstatus[txs->cnt].ts_rate = SM(bf->bf_endpt, ATH9K_HTC_TXSTAT_EPID);
  486. if (txok)
  487. txs->txstatus[txs->cnt].ts_flags |= ATH9K_HTC_TXSTAT_ACK;
  488. if (rcs)
  489. ath_tx_status_update_rate(sc, rcs, ds->ds_txstat.ts_rate, txs);
  490. txs->cnt++;
  491. }
  492. void ath_tx_status_send(struct ath_softc_tgt *sc)
  493. {
  494. int i;
  495. if (sc->sc_tx_draining)
  496. return;
  497. for (i = 0; i < 2; i++) {
  498. if (sc->tx_status[i].cnt) {
  499. wmi_event(sc->tgt_wmi_handle, WMI_TXSTATUS_EVENTID,
  500. &sc->tx_status[i], sizeof(WMI_TXSTATUS_EVENT));
  501. /* FIXME: Handle failures. */
  502. sc->tx_status[i].cnt = 0;
  503. }
  504. }
  505. }
  506. static void owltgt_tx_process_cabq(struct ath_softc_tgt *sc, struct ath_txq *txq)
  507. {
  508. struct ath_hal *ah = sc->sc_ah;
  509. ah->ah_setInterrupts(ah, sc->sc_imask & ~HAL_INT_SWBA);
  510. owltgt_tx_processq(sc, txq, OWL_TXQ_ACTIVE);
  511. ah->ah_setInterrupts(ah, sc->sc_imask);
  512. }
  513. void owl_tgt_tx_tasklet(TQUEUE_ARG data)
  514. {
  515. struct ath_softc_tgt *sc = (struct ath_softc_tgt *)data;
  516. a_int32_t i;
  517. struct ath_txq *txq;
  518. ath_tx_status_clear(sc);
  519. for (i = 0; i < (HAL_NUM_TX_QUEUES - 6); i++) {
  520. txq = ATH_TXQ(sc, i);
  521. if (ATH_TXQ_SETUP(sc, i)) {
  522. if (txq == sc->sc_cabq)
  523. owltgt_tx_process_cabq(sc, txq);
  524. else
  525. owltgt_tx_processq(sc, txq, OWL_TXQ_ACTIVE);
  526. }
  527. }
  528. ath_tx_status_send(sc);
  529. }
  530. void owltgt_tx_processq(struct ath_softc_tgt *sc, struct ath_txq *txq,
  531. owl_txq_state_t txqstate)
  532. {
  533. struct ath_tx_buf *bf;
  534. struct ath_tx_desc *ds;
  535. struct ath_hal *ah = sc->sc_ah;
  536. HAL_STATUS status;
  537. for (;;) {
  538. if (asf_tailq_empty(&txq->axq_q)) {
  539. txq->axq_link = NULL;
  540. txq->axq_linkbuf = NULL;
  541. break;
  542. }
  543. bf = asf_tailq_first(&txq->axq_q);
  544. ds = bf->bf_lastds;
  545. status = ah->ah_procTxDesc(ah, ds);
  546. if (status == HAL_EINPROGRESS) {
  547. if (txqstate == OWL_TXQ_ACTIVE)
  548. break;
  549. else if (txqstate == OWL_TXQ_STOPPED) {
  550. __stats(sc, tx_stopfiltered);
  551. ds->ds_txstat.ts_flags = 0;
  552. ds->ds_txstat.ts_status = HAL_OK;
  553. } else {
  554. ds->ds_txstat.ts_flags = HAL_TX_SW_FILTERED;
  555. }
  556. }
  557. ATH_TXQ_REMOVE_HEAD(txq, bf, bf_list);
  558. if ((asf_tailq_empty(&txq->axq_q))) {
  559. __stats(sc, tx_qnull);
  560. txq->axq_link = NULL;
  561. txq->axq_linkbuf = NULL;
  562. }
  563. if (bf->bf_comp) {
  564. bf->bf_comp(sc, bf);
  565. } else {
  566. ath_tx_status_update(sc, bf);
  567. ath_buf_comp(sc, bf);
  568. }
  569. if (txqstate == OWL_TXQ_ACTIVE) {
  570. ath_tgt_txq_schedule(sc, txq);
  571. }
  572. }
  573. }
  574. static struct ieee80211_frame* ATH_SKB2_WH(adf_nbuf_t skb)
  575. {
  576. a_uint8_t *anbdata;
  577. a_uint32_t anblen;
  578. adf_nbuf_peek_header(skb, &anbdata, &anblen);
  579. return((struct ieee80211_frame *)anbdata);
  580. }
  581. void
  582. ath_tgt_tid_drain(struct ath_softc_tgt *sc, struct ath_atx_tid *tid)
  583. {
  584. struct ath_tx_buf *bf;
  585. while (!asf_tailq_empty(&tid->buf_q)) {
  586. TAILQ_DEQ(&tid->buf_q, bf, bf_list);
  587. ath_tx_freebuf(sc, bf);
  588. }
  589. tid->seq_next = tid->seq_start;
  590. tid->baw_tail = tid->baw_head;
  591. }
  592. static void ath_tgt_tx_comp_normal(struct ath_softc_tgt *sc,
  593. struct ath_tx_buf *bf)
  594. {
  595. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  596. ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  597. if (tid->flag & TID_CLEANUP_INPROGRES) {
  598. owl_tgt_tid_cleanup(sc, tid);
  599. }
  600. if (bf->bf_node) {
  601. ath_tx_uc_comp(sc, bf);
  602. }
  603. ath_tx_freebuf(sc, bf);
  604. }
  605. static struct ieee80211_node_target * ath_tgt_find_node(struct ath_softc_tgt *sc,
  606. a_int32_t node_index)
  607. {
  608. struct ath_node_target *an;
  609. struct ieee80211_node_target *ni;
  610. if (node_index > TARGET_NODE_MAX)
  611. return NULL;
  612. an = &sc->sc_sta[node_index];
  613. ni = &an->ni;
  614. if (an->an_valid) {
  615. if (ni->ni_vap == NULL) {
  616. return NULL;
  617. }
  618. return ni;
  619. }
  620. return NULL;
  621. }
  622. static struct ath_tx_buf* ath_tx_buf_alloc(struct ath_softc_tgt *sc)
  623. {
  624. struct ath_tx_buf *bf = NULL;
  625. bf = asf_tailq_first(&sc->sc_txbuf);
  626. if (bf != NULL) {
  627. adf_os_mem_set(&bf->bf_state, 0, sizeof(struct ath_buf_state));
  628. asf_tailq_remove(&sc->sc_txbuf, bf, bf_list);
  629. } else {
  630. adf_os_assert(0);
  631. }
  632. return bf;
  633. }
  634. struct ath_tx_buf* ath_tgt_tx_prepare(struct ath_softc_tgt *sc,
  635. adf_nbuf_t skb, ath_data_hdr_t *dh)
  636. {
  637. struct ath_tx_buf *bf;
  638. struct ieee80211_node_target *ni;
  639. struct ath_atx_tid *tid;
  640. ni = ath_tgt_find_node(sc, dh->ni_index);
  641. if (ni == NULL)
  642. return NULL;
  643. tid = ATH_AN_2_TID(ATH_NODE_TARGET(ni), dh->tidno);
  644. if (tid->flag & TID_REINITIALIZE) {
  645. adf_os_print("drop frame due to TID reinit\n");
  646. return NULL;
  647. }
  648. bf = ath_tx_buf_alloc(sc);
  649. if (!bf) {
  650. __stats(sc, tx_nobufs);
  651. return NULL;
  652. }
  653. bf->bf_tidno = dh->tidno;
  654. bf->bf_txq = TID_TO_ACTXQ(bf->bf_tidno);
  655. bf->bf_keytype = dh->keytype;
  656. bf->bf_keyix = dh->keyix;
  657. bf->bf_protmode = dh->flags & (IEEE80211_PROT_RTSCTS | IEEE80211_PROT_CTSONLY);
  658. bf->bf_node = ni;
  659. adf_nbuf_queue_add(&bf->bf_skbhead, skb);
  660. skb = adf_nbuf_queue_first(&(bf->bf_skbhead));
  661. if (adf_nbuf_queue_len(&(bf->bf_skbhead)) == 0) {
  662. __stats(sc, tx_noskbs);
  663. return NULL;
  664. }
  665. adf_os_assert(skb);
  666. bf->bf_skb = skb;
  667. ath_tgt_txbuf_setup(sc, bf, dh);
  668. ath_dma_map(sc, bf);
  669. ath_tx_tgt_setds(sc, bf);
  670. return bf;
  671. }
  672. static void ath_tgt_tx_seqno_normal(struct ath_tx_buf *bf)
  673. {
  674. struct ieee80211_node_target *ni = bf->bf_node;
  675. struct ath_node_target *an = ATH_NODE_TARGET(ni);
  676. struct ieee80211_frame *wh = ATH_SKB_2_WH(bf->bf_skb);
  677. struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  678. u_int8_t fragno = (wh->i_seq[0] & 0xf);
  679. INCR(ni->ni_txseqmgmt, IEEE80211_SEQ_MAX);
  680. bf->bf_seqno = (tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
  681. *(u_int16_t *)wh->i_seq = adf_os_cpu_to_le16(bf->bf_seqno);
  682. wh->i_seq[0] |= fragno;
  683. if (!(wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG))
  684. INCR(tid->seq_next, IEEE80211_SEQ_MAX);
  685. }
  686. static a_int32_t ath_key_setup(struct ieee80211_node_target *ni,
  687. struct ath_tx_buf *bf)
  688. {
  689. struct ieee80211_frame *wh = ATH_SKB_2_WH(bf->bf_skb);
  690. if (!(wh->i_fc[1] & IEEE80211_FC1_WEP)) {
  691. bf->bf_keytype = HAL_KEY_TYPE_CLEAR;
  692. bf->bf_keyix = HAL_TXKEYIX_INVALID;
  693. return 0;
  694. }
  695. switch (bf->bf_keytype) {
  696. case HAL_KEY_TYPE_WEP:
  697. bf->bf_pktlen += IEEE80211_WEP_ICVLEN;
  698. break;
  699. case HAL_KEY_TYPE_AES:
  700. bf->bf_pktlen += IEEE80211_WEP_MICLEN;
  701. break;
  702. case HAL_KEY_TYPE_TKIP:
  703. bf->bf_pktlen += IEEE80211_WEP_ICVLEN;
  704. break;
  705. default:
  706. break;
  707. }
  708. if (bf->bf_keytype == HAL_KEY_TYPE_AES ||
  709. bf->bf_keytype == HAL_KEY_TYPE_TKIP)
  710. ieee80211_tgt_crypto_encap(wh, ni, bf->bf_keytype);
  711. return 0;
  712. }
  713. static void ath_tgt_txq_add_ucast(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  714. {
  715. struct ath_hal *ah = sc->sc_ah;
  716. struct ath_txq *txq;
  717. HAL_STATUS status;
  718. volatile a_int32_t txe_val;
  719. adf_os_assert(bf);
  720. txq = bf->bf_txq;
  721. status = ah->ah_procTxDesc(ah, bf->bf_lastds);
  722. ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
  723. if (txq->axq_link == NULL) {
  724. ah->ah_setTxDP(ah, txq->axq_qnum, ATH_BUF_GET_DESC_PHY_ADDR(bf));
  725. } else {
  726. *txq->axq_link = ATH_BUF_GET_DESC_PHY_ADDR(bf);
  727. txe_val = ioread32_mac(0x0840);
  728. if (!(txe_val & (1<< txq->axq_qnum)))
  729. ah->ah_setTxDP(ah, txq->axq_qnum, ATH_BUF_GET_DESC_PHY_ADDR(bf));
  730. }
  731. txq->axq_link = &bf->bf_lastds->ds_link;
  732. ah->ah_startTxDma(ah, txq->axq_qnum);
  733. }
  734. static a_int32_t ath_tgt_txbuf_setup(struct ath_softc_tgt *sc,
  735. struct ath_tx_buf *bf,
  736. ath_data_hdr_t *dh)
  737. {
  738. struct ieee80211_frame *wh = ATH_SKB2_WH(bf->bf_skb);
  739. a_int32_t retval;
  740. a_uint32_t flags = adf_os_ntohl(dh->flags);
  741. ath_tgt_tx_seqno_normal(bf);
  742. bf->bf_txq_add = ath_tgt_txq_add_ucast;
  743. bf->bf_hdrlen = ieee80211_anyhdrsize(wh);
  744. bf->bf_pktlen = ath_get_pktlen(bf, bf->bf_hdrlen);
  745. bf->bf_ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
  746. if ((retval = ath_key_setup(bf->bf_node, bf)) < 0)
  747. return retval;
  748. if (flags & ATH_SHORT_PREAMBLE)
  749. bf->bf_shpream = AH_TRUE;
  750. else
  751. bf->bf_shpream = AH_FALSE;
  752. bf->bf_flags = HAL_TXDESC_CLRDMASK;
  753. bf->bf_atype = HAL_PKT_TYPE_NORMAL;
  754. return 0;
  755. }
  756. static a_int32_t
  757. ath_get_pktlen(struct ath_tx_buf *bf, a_int32_t hdrlen)
  758. {
  759. adf_nbuf_t skb = bf->bf_skb;
  760. a_int32_t pktlen;
  761. skb = adf_nbuf_queue_first(&bf->bf_skbhead);
  762. pktlen = adf_nbuf_len(skb);
  763. pktlen -= (hdrlen & 3);
  764. pktlen += IEEE80211_CRC_LEN;
  765. return pktlen;
  766. }
  767. void
  768. ath_tgt_tx_send_normal(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  769. {
  770. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  771. struct ath_rc_series rcs[4];
  772. struct ath_rc_series mrcs[4];
  773. a_int32_t shortPreamble = 0;
  774. a_int32_t isProbe = 0;
  775. adf_os_mem_set(rcs, 0, sizeof(struct ath_rc_series)*4 );
  776. adf_os_mem_set(mrcs, 0, sizeof(struct ath_rc_series)*4 );
  777. if (!bf->bf_ismcast) {
  778. ath_tgt_rate_findrate(sc, an, shortPreamble,
  779. 0, 0, 0, 0, 0,
  780. rcs, &isProbe);
  781. ath_hal_memcpy(bf->bf_rcs, rcs, sizeof(rcs));
  782. } else {
  783. mrcs[1].tries = mrcs[2].tries = mrcs[3].tries = 0;
  784. mrcs[1].rix = mrcs[2].rix = mrcs[3].rix = 0;
  785. mrcs[0].rix = 0;
  786. mrcs[0].tries = 1;
  787. mrcs[0].flags = 0;
  788. ath_hal_memcpy(bf->bf_rcs, mrcs, sizeof(mrcs));
  789. }
  790. ath_buf_set_rate(sc, bf);
  791. bf->bf_txq_add(sc, bf);
  792. }
  793. static void
  794. ath_tx_freebuf(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  795. {
  796. a_int32_t i ;
  797. struct ath_tx_desc *bfd = NULL;
  798. struct ath_hal *ah = sc->sc_ah;
  799. for (bfd = bf->bf_desc, i = 0; i < bf->bf_dmamap_info.nsegs; bfd++, i++) {
  800. ah->ah_clr11nAggr(bfd);
  801. ah->ah_set11nBurstDuration(bfd, 0);
  802. ah->ah_set11nVirtualMoreFrag(bfd, 0);
  803. }
  804. ath_dma_unmap(sc, bf);
  805. ath_tgt_skb_free(sc, &bf->bf_skbhead,bf->bf_endpt);
  806. bf->bf_skb = NULL;
  807. bf->bf_comp = NULL;
  808. bf->bf_node = NULL;
  809. bf->bf_next = NULL;
  810. bf = ath_buf_toggle(sc, bf, 0);
  811. bf->bf_retries = 0;
  812. bf->bf_isretried = 0;
  813. if (bf != NULL)
  814. asf_tailq_insert_tail(&sc->sc_txbuf, bf, bf_list);
  815. }
  816. static void
  817. ath_tx_uc_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  818. {
  819. ath_tx_status_update(sc, bf);
  820. ath_update_stats(sc, bf);
  821. ath_rate_tx_complete(sc, ATH_NODE_TARGET(bf->bf_node),
  822. bf->bf_lastds, bf->bf_rcs, 1, 0);
  823. }
  824. static void
  825. ath_update_stats(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  826. {
  827. struct ath_tx_desc *ds = bf->bf_desc;
  828. u_int32_t sr, lr;
  829. if (ds->ds_txstat.ts_status == 0) {
  830. if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE)
  831. sc->sc_tx_stats.ast_tx_altrate++;
  832. } else {
  833. if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY)
  834. sc->sc_tx_stats.ast_tx_xretries++;
  835. if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO)
  836. sc->sc_tx_stats.ast_tx_fifoerr++;
  837. if (ds->ds_txstat.ts_status & HAL_TXERR_FILT)
  838. sc->sc_tx_stats.ast_tx_filtered++;
  839. if (ds->ds_txstat.ts_status & HAL_TXERR_TIMER_EXPIRED)
  840. sc->sc_tx_stats.ast_tx_timer_exp++;
  841. }
  842. sr = ds->ds_txstat.ts_shortretry;
  843. lr = ds->ds_txstat.ts_longretry;
  844. sc->sc_tx_stats.ast_tx_shortretry += sr;
  845. sc->sc_tx_stats.ast_tx_longretry += lr;
  846. }
  847. void
  848. ath_tgt_send_mgt(struct ath_softc_tgt *sc,adf_nbuf_t hdr_buf, adf_nbuf_t skb,
  849. HTC_ENDPOINT_ID endpt)
  850. {
  851. struct ieee80211_node_target *ni;
  852. struct ieee80211vap_target *vap;
  853. struct ath_vap_target *avp;
  854. struct ath_hal *ah = sc->sc_ah;
  855. a_uint8_t rix, txrate, ctsrate, cix = 0xff, *data;
  856. a_uint32_t subtype, flags, ctsduration;
  857. a_int32_t i, iswep, ismcast, hdrlen, pktlen, try0, len;
  858. struct ath_tx_desc *ds=NULL;
  859. struct ath_txq *txq=NULL;
  860. struct ath_tx_buf *bf;
  861. HAL_PKT_TYPE atype;
  862. const HAL_RATE_TABLE *rt;
  863. HAL_BOOL shortPreamble;
  864. struct ieee80211_frame *wh;
  865. struct ath_rc_series rcs[4];
  866. HAL_11N_RATE_SERIES series[4];
  867. ath_mgt_hdr_t *mh;
  868. a_int8_t keyix;
  869. if (!hdr_buf) {
  870. adf_nbuf_peek_header(skb, &data, &len);
  871. adf_nbuf_pull_head(skb, sizeof(ath_mgt_hdr_t));
  872. } else {
  873. adf_nbuf_peek_header(hdr_buf, &data, &len);
  874. }
  875. adf_os_assert(len >= sizeof(ath_mgt_hdr_t));
  876. mh = (ath_mgt_hdr_t *)data;
  877. adf_nbuf_peek_header(skb, &data, &len);
  878. wh = (struct ieee80211_frame *)data;
  879. adf_os_mem_set(rcs, 0, sizeof(struct ath_rc_series)*4);
  880. adf_os_mem_set(series, 0, sizeof(HAL_11N_RATE_SERIES)*4);
  881. bf = asf_tailq_first(&sc->sc_txbuf);
  882. if (!bf)
  883. goto fail;
  884. asf_tailq_remove(&sc->sc_txbuf, bf, bf_list);
  885. ni = ath_tgt_find_node(sc, mh->ni_index);
  886. if (!ni)
  887. goto fail;
  888. bf->bf_endpt = endpt;
  889. bf->bf_cookie = mh->cookie;
  890. bf->bf_protmode = mh->flags & (IEEE80211_PROT_RTSCTS | IEEE80211_PROT_CTSONLY);
  891. txq = &sc->sc_txq[1];
  892. iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
  893. ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
  894. hdrlen = ieee80211_anyhdrsize(wh);
  895. pktlen = len;
  896. keyix = HAL_TXKEYIX_INVALID;
  897. pktlen -= (hdrlen & 3);
  898. pktlen += IEEE80211_CRC_LEN;
  899. if (iswep)
  900. keyix = mh->keyix;
  901. adf_nbuf_map(sc->sc_dev, bf->bf_dmamap, skb, ADF_OS_DMA_TO_DEVICE);
  902. bf->bf_skb = skb;
  903. adf_nbuf_queue_add(&bf->bf_skbhead, skb);
  904. ds = bf->bf_desc;
  905. rt = sc->sc_currates;
  906. adf_os_assert(rt != NULL);
  907. if (mh->flags == ATH_SHORT_PREAMBLE)
  908. shortPreamble = AH_TRUE;
  909. else
  910. shortPreamble = AH_FALSE;
  911. flags = HAL_TXDESC_CLRDMASK;
  912. switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
  913. case IEEE80211_FC0_TYPE_MGT:
  914. subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  915. if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
  916. atype = HAL_PKT_TYPE_PROBE_RESP;
  917. else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
  918. atype = HAL_PKT_TYPE_ATIM;
  919. else
  920. atype = HAL_PKT_TYPE_NORMAL;
  921. break;
  922. default:
  923. atype = HAL_PKT_TYPE_NORMAL;
  924. break;
  925. }
  926. avp = &sc->sc_vap[mh->vap_index];
  927. rcs[0].rix = ath_get_minrateidx(sc, avp);
  928. rcs[0].tries = ATH_TXMAXTRY;
  929. rcs[0].flags = 0;
  930. adf_os_mem_copy(bf->bf_rcs, rcs, sizeof(rcs));
  931. rix = rcs[0].rix;
  932. try0 = rcs[0].tries;
  933. txrate = rt->info[rix].rateCode;
  934. if (shortPreamble){
  935. txrate |= rt->info[rix].shortPreamble;
  936. }
  937. vap = ni->ni_vap;
  938. bf->bf_node = ni;
  939. if (ismcast) {
  940. flags |= HAL_TXDESC_NOACK;
  941. try0 = 1;
  942. } else if (pktlen > vap->iv_rtsthreshold) {
  943. flags |= HAL_TXDESC_RTSENA;
  944. cix = rt->info[rix].controlRate;
  945. }
  946. if ((bf->bf_protmode != IEEE80211_PROT_NONE) &&
  947. rt->info[rix].phy == IEEE80211_T_OFDM &&
  948. (flags & HAL_TXDESC_NOACK) == 0) {
  949. cix = rt->info[sc->sc_protrix].controlRate;
  950. sc->sc_tx_stats.ast_tx_protect++;
  951. }
  952. *(a_uint16_t *)&wh->i_seq[0] = adf_os_cpu_to_le16(ni->ni_txseqmgmt <<
  953. IEEE80211_SEQ_SEQ_SHIFT);
  954. INCR(ni->ni_txseqmgmt, IEEE80211_SEQ_MAX);
  955. ctsduration = 0;
  956. if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
  957. adf_os_assert(cix != 0xff);
  958. ctsrate = rt->info[cix].rateCode;
  959. if (shortPreamble) {
  960. ctsrate |= rt->info[cix].shortPreamble;
  961. if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
  962. ctsduration += rt->info[cix].spAckDuration;
  963. if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
  964. ctsduration += rt->info[cix].spAckDuration;
  965. } else {
  966. if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
  967. ctsduration += rt->info[cix].lpAckDuration;
  968. if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
  969. ctsduration += rt->info[cix].lpAckDuration;
  970. }
  971. ctsduration += ath_hal_computetxtime(ah,
  972. rt, pktlen, rix, shortPreamble);
  973. try0 = 1;
  974. } else
  975. ctsrate = 0;
  976. flags |= HAL_TXDESC_INTREQ;
  977. ah->ah_setupTxDesc(ds
  978. , pktlen
  979. , hdrlen
  980. , atype
  981. , 60
  982. , txrate, try0
  983. , keyix
  984. , flags
  985. , ctsrate
  986. , ctsduration);
  987. bf->bf_flags = flags;
  988. /*
  989. * Set key type in tx desc while sending the encrypted challenge to AP
  990. * in Auth frame 3 of Shared Authentication, owl needs this.
  991. */
  992. if (iswep && (keyix != HAL_TXKEYIX_INVALID) &&
  993. (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_AUTH)
  994. ah->ah_fillKeyTxDesc(ds, mh->keytype);
  995. ath_filltxdesc(sc, bf);
  996. for (i=0; i<4; i++) {
  997. series[i].Tries = 2;
  998. series[i].Rate = txrate;
  999. series[i].ChSel = sc->sc_ic.ic_tx_chainmask;
  1000. series[i].RateFlags = 0;
  1001. }
  1002. ah->ah_set11nRateScenario(ds, 0, ctsrate, series, 4, 0);
  1003. ath_tgt_txqaddbuf(sc, txq, bf, bf->bf_lastds);
  1004. return;
  1005. fail:
  1006. HTC_ReturnBuffers(sc->tgt_htc_handle, endpt, skb);
  1007. return;
  1008. }
  1009. static void
  1010. ath_tgt_txqaddbuf(struct ath_softc_tgt *sc,
  1011. struct ath_txq *txq, struct ath_tx_buf *bf,
  1012. struct ath_tx_desc *lastds)
  1013. {
  1014. struct ath_hal *ah = sc->sc_ah;
  1015. ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
  1016. if (txq->axq_link == NULL) {
  1017. ah->ah_setTxDP(ah, txq->axq_qnum, ATH_BUF_GET_DESC_PHY_ADDR(bf));
  1018. } else {
  1019. *txq->axq_link = ATH_BUF_GET_DESC_PHY_ADDR(bf);
  1020. }
  1021. txq->axq_link = &lastds->ds_link;
  1022. ah->ah_startTxDma(ah, txq->axq_qnum);
  1023. }
  1024. void ath_tgt_handle_normal(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1025. {
  1026. ath_atx_tid_t *tid;
  1027. struct ath_node_target *an;
  1028. an = (struct ath_node_target *)bf->bf_node;
  1029. adf_os_assert(an);
  1030. tid = &an->tid[bf->bf_tidno];
  1031. adf_os_assert(tid);
  1032. bf->bf_comp = ath_tgt_tx_comp_normal;
  1033. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1034. ath_tgt_tx_send_normal(sc, bf);
  1035. }
  1036. static void
  1037. ath_tgt_tx_enqueue(struct ath_txq *txq, struct ath_atx_tid *tid)
  1038. {
  1039. if (tid->paused)
  1040. return;
  1041. if (tid->sched)
  1042. return;
  1043. tid->sched = AH_TRUE;
  1044. asf_tailq_insert_tail(&txq->axq_tidq, tid, tid_qelem);
  1045. }
  1046. static void
  1047. ath_tgt_txq_schedule(struct ath_softc_tgt *sc, struct ath_txq *txq)
  1048. {
  1049. struct ath_atx_tid *tid;
  1050. u_int8_t bdone;
  1051. bdone = AH_FALSE;
  1052. do {
  1053. TAILQ_DEQ(&txq->axq_tidq, tid, tid_qelem);
  1054. if (tid == NULL)
  1055. return;
  1056. tid->sched = AH_FALSE;
  1057. if (tid->paused)
  1058. continue;
  1059. if (!(tid->flag & TID_AGGR_ENABLED))
  1060. ath_tgt_tx_sched_normal(sc,tid);
  1061. else
  1062. ath_tgt_tx_sched_aggr(sc,tid);
  1063. bdone = AH_TRUE;
  1064. if (!asf_tailq_empty(&tid->buf_q)) {
  1065. ath_tgt_tx_enqueue(txq, tid);
  1066. }
  1067. } while (!asf_tailq_empty(&txq->axq_tidq) && !bdone);
  1068. }
  1069. void
  1070. ath_tgt_handle_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1071. {
  1072. ath_atx_tid_t *tid;
  1073. struct ath_node_target *an;
  1074. struct ath_txq *txq = bf->bf_txq;
  1075. a_bool_t queue_frame, within_baw;
  1076. an = (struct ath_node_target *)bf->bf_node;
  1077. adf_os_assert(an);
  1078. tid = &an->tid[bf->bf_tidno];
  1079. adf_os_assert(tid);
  1080. bf->bf_comp = ath_tgt_tx_comp_aggr;
  1081. within_baw = BAW_WITHIN(tid->seq_start, tid->baw_size,
  1082. SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1083. queue_frame = ( (txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) ||
  1084. (!asf_tailq_empty(&tid->buf_q)) ||
  1085. (tid->paused) || (!within_baw) );
  1086. if (queue_frame) {
  1087. asf_tailq_insert_tail(&tid->buf_q, bf, bf_list);
  1088. ath_tgt_tx_enqueue(txq, tid);
  1089. } else {
  1090. ath_tx_addto_baw(tid, bf);
  1091. __stats(sc, txaggr_nframes);
  1092. ath_tgt_tx_send_normal(sc, bf);
  1093. }
  1094. }
  1095. static void
  1096. ath_tgt_tx_sched_normal(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
  1097. {
  1098. struct ath_tx_buf *bf;
  1099. struct ath_txq *txq =TID_TO_ACTXQ(tid->tidno);;
  1100. do {
  1101. if (asf_tailq_empty(&tid->buf_q))
  1102. break;
  1103. bf = asf_tailq_first(&tid->buf_q);
  1104. asf_tailq_remove(&tid->buf_q, bf, bf_list);
  1105. ath_tgt_tx_send_normal(sc, bf);
  1106. } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH);
  1107. }
  1108. static void
  1109. ath_tgt_tx_sched_aggr(struct ath_softc_tgt *sc, ath_atx_tid_t *tid)
  1110. {
  1111. struct ath_tx_buf *bf, *bf_last;
  1112. ATH_AGGR_STATUS status;
  1113. ath_tx_bufhead bf_q;
  1114. struct ath_txq *txq = TID_TO_ACTXQ(tid->tidno);
  1115. struct ath_tx_desc *ds = NULL;
  1116. struct ath_hal *ah = sc->sc_ah;
  1117. int i;
  1118. if (asf_tailq_empty(&tid->buf_q))
  1119. return;
  1120. do {
  1121. if (asf_tailq_empty(&tid->buf_q))
  1122. break;
  1123. asf_tailq_init(&bf_q);
  1124. status = ath_tgt_tx_form_aggr(sc, tid, &bf_q);
  1125. if (asf_tailq_empty(&bf_q))
  1126. break;
  1127. bf = asf_tailq_first(&bf_q);
  1128. bf_last = asf_tailq_last(&bf_q, ath_tx_bufhead_s);
  1129. if (bf->bf_nframes == 1) {
  1130. if(bf->bf_retries == 0)
  1131. __stats(sc, txaggr_single);
  1132. bf->bf_isaggr = 0;
  1133. bf->bf_lastds = &(bf->bf_descarr[bf->bf_dmamap_info.nsegs -1]);
  1134. bf->bf_lastds->ds_link = 0;
  1135. bf->bf_next = NULL;
  1136. for(ds = bf->bf_desc; ds <= bf->bf_lastds; ds++)
  1137. ah->ah_clr11nAggr(ds);
  1138. ath_buf_set_rate(sc, bf);
  1139. bf->bf_txq_add(sc, bf);
  1140. continue;
  1141. }
  1142. bf_last->bf_next = NULL;
  1143. bf_last->bf_lastds->ds_link = 0;
  1144. bf_last->bf_ndelim = 0;
  1145. bf->bf_isaggr = 1;
  1146. ath_buf_set_rate(sc, bf);
  1147. ah->ah_set11nAggrFirst(bf->bf_desc, bf->bf_al,
  1148. bf->bf_ndelim);
  1149. bf->bf_lastds = bf_last->bf_lastds;
  1150. for (i = 0; i < bf_last->bf_dmamap_info.nsegs; i++)
  1151. ah->ah_set11nAggrLast(&bf_last->bf_descarr[i]);
  1152. if (status == ATH_AGGR_8K_LIMITED) {
  1153. adf_os_assert(0);
  1154. break;
  1155. }
  1156. bf->bf_txq_add(sc, bf);
  1157. } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
  1158. status != ATH_TGT_AGGR_BAW_CLOSED);
  1159. }
  1160. static u_int32_t ath_lookup_rate(struct ath_softc_tgt *sc,
  1161. struct ath_node_target *an,
  1162. struct ath_tx_buf *bf)
  1163. {
  1164. int i, prate;
  1165. u_int32_t max4msframelen, frame_length;
  1166. u_int16_t aggr_limit, legacy=0;
  1167. const HAL_RATE_TABLE *rt = sc->sc_currates;
  1168. struct ieee80211_node_target *ieee_node = (struct ieee80211_node_target *)an;
  1169. if (bf->bf_ismcast) {
  1170. bf->bf_rcs[1].tries = bf->bf_rcs[2].tries = bf->bf_rcs[3].tries = 0;
  1171. bf->bf_rcs[0].rix = 0xb;
  1172. bf->bf_rcs[0].tries = ATH_TXMAXTRY - 1;
  1173. bf->bf_rcs[0].flags = 0;
  1174. } else {
  1175. ath_tgt_rate_findrate(sc, an, AH_TRUE, 0, ATH_TXMAXTRY-1, 4, 1,
  1176. ATH_RC_PROBE_ALLOWED, bf->bf_rcs, &prate);
  1177. }
  1178. max4msframelen = IEEE80211_AMPDU_LIMIT_MAX;
  1179. for (i = 0; i < 4; i++) {
  1180. if (bf->bf_rcs[i].tries) {
  1181. frame_length = bf->bf_rcs[i].max4msframelen;
  1182. if (rt->info[bf->bf_rcs[i].rix].phy != IEEE80211_T_HT) {
  1183. legacy = 1;
  1184. break;
  1185. }
  1186. max4msframelen = ATH_MIN(max4msframelen, frame_length);
  1187. }
  1188. }
  1189. if (prate || legacy)
  1190. return 0;
  1191. if (sc->sc_ic.ic_enable_coex)
  1192. aggr_limit = ATH_MIN((max4msframelen*3)/8, sc->sc_ic.ic_ampdu_limit);
  1193. else
  1194. aggr_limit = ATH_MIN(max4msframelen, sc->sc_ic.ic_ampdu_limit);
  1195. if (ieee_node->ni_maxampdu)
  1196. aggr_limit = ATH_MIN(aggr_limit, ieee_node->ni_maxampdu);
  1197. return aggr_limit;
  1198. }
  1199. int ath_tgt_tx_form_aggr(struct ath_softc_tgt *sc, ath_atx_tid_t *tid,
  1200. ath_tx_bufhead *bf_q)
  1201. {
  1202. struct ath_tx_buf *bf_first ,*bf_prev = NULL;
  1203. int nframes = 0, rl = 0;;
  1204. struct ath_tx_desc *ds = NULL;
  1205. struct ath_tx_buf *bf;
  1206. struct ath_hal *ah = sc->sc_ah;
  1207. u_int16_t aggr_limit = (64*1024 -1), al = 0, bpad = 0, al_delta;
  1208. u_int16_t h_baw = tid->baw_size/2, prev_al = 0, prev_frames = 0;
  1209. bf_first = asf_tailq_first(&tid->buf_q);
  1210. do {
  1211. bf = asf_tailq_first(&tid->buf_q);
  1212. adf_os_assert(bf);
  1213. if (!BAW_WITHIN(tid->seq_start, tid->baw_size,
  1214. SEQNO_FROM_BF_SEQNO(bf->bf_seqno))) {
  1215. bf_first->bf_al= al;
  1216. bf_first->bf_nframes = nframes;
  1217. return ATH_TGT_AGGR_BAW_CLOSED;
  1218. }
  1219. if (!rl) {
  1220. aggr_limit = ath_lookup_rate(sc, tid->an, bf);
  1221. rl = 1;
  1222. }
  1223. al_delta = ATH_AGGR_DELIM_SZ + bf->bf_pktlen;
  1224. if (nframes && (aggr_limit < (al + bpad + al_delta + prev_al))) {
  1225. bf_first->bf_al= al;
  1226. bf_first->bf_nframes = nframes;
  1227. return ATH_TGT_AGGR_LIMITED;
  1228. }
  1229. #ifdef PROJECT_K2
  1230. if ((nframes + prev_frames) >= ATH_MIN((h_baw), 17)) {
  1231. #else
  1232. if ((nframes + prev_frames) >= ATH_MIN((h_baw), 22)) {
  1233. #endif
  1234. bf_first->bf_al= al;
  1235. bf_first->bf_nframes = nframes;
  1236. return ATH_TGT_AGGR_LIMITED;
  1237. }
  1238. ath_tx_addto_baw(tid, bf);
  1239. asf_tailq_remove(&tid->buf_q, bf, bf_list);
  1240. asf_tailq_insert_tail(bf_q, bf, bf_list);
  1241. nframes ++;
  1242. adf_os_assert(bf);
  1243. adf_os_assert(bf->bf_comp == ath_tgt_tx_comp_aggr);
  1244. al += bpad + al_delta;
  1245. bf->bf_ndelim = ATH_AGGR_GET_NDELIM(bf->bf_pktlen);
  1246. switch (bf->bf_keytype) {
  1247. case HAL_KEY_TYPE_AES:
  1248. bf->bf_ndelim += ATH_AGGR_ENCRYPTDELIM;
  1249. break;
  1250. case HAL_KEY_TYPE_WEP:
  1251. case HAL_KEY_TYPE_TKIP:
  1252. bf->bf_ndelim += 64;
  1253. break;
  1254. case HAL_KEY_TYPE_WAPI:
  1255. bf->bf_ndelim += 12;
  1256. break;
  1257. default:
  1258. break;
  1259. }
  1260. bpad = PADBYTES(al_delta) + (bf->bf_ndelim << 2);
  1261. if (bf_prev) {
  1262. bf_prev->bf_next = bf;
  1263. bf_prev->bf_lastds->ds_link = ATH_BUF_GET_DESC_PHY_ADDR(bf);
  1264. }
  1265. bf_prev = bf;
  1266. for(ds = bf->bf_desc; ds <= bf->bf_lastds; ds++)
  1267. ah->ah_set11nAggrMiddle(ds, bf->bf_ndelim);
  1268. } while (!asf_tailq_empty(&tid->buf_q));
  1269. bf_first->bf_al= al;
  1270. bf_first->bf_nframes = nframes;
  1271. return ATH_TGT_AGGR_DONE;
  1272. }
  1273. void ath_tx_addto_baw(ath_atx_tid_t *tid, struct ath_tx_buf *bf)
  1274. {
  1275. int index, cindex;
  1276. if (bf->bf_isretried) {
  1277. return;
  1278. }
  1279. index = ATH_BA_INDEX(tid->seq_start, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1280. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  1281. TX_BUF_BITMAP_SET(tid->tx_buf_bitmap, cindex);
  1282. if (index >= ((tid->baw_tail - tid->baw_head) & (ATH_TID_MAX_BUFS - 1))) {
  1283. tid->baw_tail = cindex;
  1284. INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
  1285. }
  1286. }
  1287. void ath_tgt_tx_comp_aggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1288. {
  1289. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  1290. ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1291. struct ath_tx_desc lastds;
  1292. struct ath_tx_desc *ds = &lastds;
  1293. struct ath_rc_series rcs[4];
  1294. u_int16_t seq_st;
  1295. u_int32_t *ba;
  1296. int ba_index;
  1297. int nbad = 0;
  1298. int nframes = bf->bf_nframes;
  1299. struct ath_tx_buf *bf_next;
  1300. ath_tx_bufhead bf_q;
  1301. int tx_ok = 1;
  1302. struct ath_tx_buf *bar = NULL;
  1303. struct ath_txq *txq;
  1304. txq = bf->bf_txq;
  1305. if (tid->flag & TID_CLEANUP_INPROGRES) {
  1306. ath_tx_comp_cleanup(sc, bf);
  1307. return;
  1308. }
  1309. adf_os_mem_copy(ds, bf->bf_lastds, sizeof (struct ath_tx_desc));
  1310. adf_os_mem_copy(rcs, bf->bf_rcs, sizeof(rcs));
  1311. if (ds->ds_txstat.ts_flags == HAL_TX_SW_FILTERED) {
  1312. adf_os_assert(0);
  1313. return;
  1314. }
  1315. if (!bf->bf_isaggr) {
  1316. ath_tx_comp_unaggr(sc, bf);
  1317. return;
  1318. }
  1319. __stats(sc, tx_compaggr);
  1320. asf_tailq_init(&bf_q);
  1321. seq_st = ATH_DS_BA_SEQ(ds);
  1322. ba = ATH_DS_BA_BITMAP(ds);
  1323. tx_ok = (ATH_DS_TX_STATUS(ds) == HAL_OK);
  1324. if (ATH_DS_TX_STATUS(ds) & HAL_TXERR_XRETRY) {
  1325. ath_tx_comp_aggr_error(sc, bf, tid);
  1326. return;
  1327. }
  1328. if (tx_ok && !ATH_DS_TX_BA(ds)) {
  1329. __stats(sc, txaggr_babug);
  1330. adf_os_print("BA Bug?\n");
  1331. ath_tx_comp_aggr_error(sc, bf, tid);
  1332. return;
  1333. }
  1334. while (bf) {
  1335. ba_index = ATH_BA_INDEX(seq_st, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1336. bf_next = bf->bf_next;
  1337. if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
  1338. __stats(sc, txaggr_compgood);
  1339. ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1340. ath_tx_status_update_aggr(sc, bf, ds, rcs, 1);
  1341. ath_tx_freebuf(sc, bf);
  1342. } else {
  1343. ath_tx_retry_subframe(sc, bf, &bf_q, &bar);
  1344. nbad ++;
  1345. }
  1346. bf = bf_next;
  1347. }
  1348. ath_update_aggr_stats(sc, ds, nframes, nbad);
  1349. ath_rate_tx_complete(sc, an, ds, rcs, nframes, nbad);
  1350. if (bar) {
  1351. ath_bar_tx(sc, tid, bar);
  1352. }
  1353. if (!asf_tailq_empty(&bf_q)) {
  1354. __stats(sc, txaggr_prepends);
  1355. TAILQ_INSERTQ_HEAD(&tid->buf_q, &bf_q, bf_list);
  1356. ath_tgt_tx_enqueue(txq, tid);
  1357. }
  1358. }
  1359. static void
  1360. ath_tx_comp_aggr_error(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
  1361. ath_atx_tid_t *tid)
  1362. {
  1363. struct ath_tx_desc lastds;
  1364. struct ath_tx_desc *ds = &lastds;
  1365. struct ath_rc_series rcs[4];
  1366. struct ath_tx_buf *bar = NULL;
  1367. struct ath_tx_buf *bf_next;
  1368. int nframes = bf->bf_nframes;
  1369. ath_tx_bufhead bf_q;
  1370. struct ath_txq *txq;
  1371. asf_tailq_init(&bf_q);
  1372. txq = bf->bf_txq;
  1373. adf_os_mem_copy(ds, bf->bf_lastds, sizeof (struct ath_tx_desc));
  1374. adf_os_mem_copy(rcs, bf->bf_rcs, sizeof(rcs));
  1375. while (bf) {
  1376. bf_next = bf->bf_next;
  1377. ath_tx_retry_subframe(sc, bf, &bf_q, &bar);
  1378. bf = bf_next;
  1379. }
  1380. ath_update_aggr_stats(sc, ds, nframes, nframes);
  1381. ath_rate_tx_complete(sc, tid->an, ds, rcs, nframes, nframes);
  1382. if (bar) {
  1383. ath_bar_tx(sc, tid, bar);
  1384. }
  1385. if (!asf_tailq_empty(&bf_q)) {
  1386. __stats(sc, txaggr_prepends);
  1387. TAILQ_INSERTQ_HEAD(&tid->buf_q, &bf_q, bf_list);
  1388. ath_tgt_tx_enqueue(txq, tid);
  1389. }
  1390. }
  1391. static void
  1392. ath_tx_comp_cleanup(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1393. {
  1394. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  1395. ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1396. struct ath_tx_desc lastds;
  1397. struct ath_tx_desc *ds = &lastds;
  1398. struct ath_rc_series rcs[4];
  1399. u_int16_t seq_st;
  1400. u_int32_t *ba;
  1401. int ba_index;
  1402. int nbad = 0;
  1403. int nframes = bf->bf_nframes;
  1404. struct ath_tx_buf *bf_next;
  1405. int tx_ok = 1;
  1406. adf_os_mem_copy(ds, bf->bf_lastds, sizeof (struct ath_tx_desc));
  1407. adf_os_mem_copy(rcs, bf->bf_rcs, sizeof(rcs));
  1408. seq_st = ATH_DS_BA_SEQ(ds);
  1409. ba = ATH_DS_BA_BITMAP(ds);
  1410. tx_ok = (ATH_DS_TX_STATUS(ds) == HAL_OK);
  1411. if (!bf->bf_isaggr) {
  1412. ath_update_stats(sc, bf);
  1413. __stats(sc, tx_compunaggr);
  1414. ath_tx_status_update(sc, bf);
  1415. ath_tx_freebuf(sc, bf);
  1416. if (tid->flag & TID_CLEANUP_INPROGRES) {
  1417. owl_tgt_tid_cleanup(sc, tid);
  1418. }
  1419. return;
  1420. }
  1421. while (bf) {
  1422. ba_index = ATH_BA_INDEX(seq_st, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1423. bf_next = bf->bf_next;
  1424. ath_tx_status_update_aggr(sc, bf, ds, rcs, 0);
  1425. ath_tx_freebuf(sc, bf);
  1426. tid->incomp--;
  1427. if (!tid->incomp) {
  1428. tid->flag &= ~TID_CLEANUP_INPROGRES;
  1429. ath_aggr_resume_tid(sc, tid);
  1430. break;
  1431. }
  1432. bf = bf_next;
  1433. }
  1434. ath_update_aggr_stats(sc, ds, nframes, nbad);
  1435. ath_rate_tx_complete(sc, an, ds, rcs, nframes, nbad);
  1436. }
  1437. static void
  1438. ath_tx_retry_subframe(struct ath_softc_tgt *sc, struct ath_tx_buf *bf,
  1439. ath_tx_bufhead *bf_q, struct ath_tx_buf **bar)
  1440. {
  1441. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  1442. ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1443. struct ath_tx_desc *ds = NULL;
  1444. struct ath_hal *ah = sc->sc_ah;
  1445. int i = 0;
  1446. __stats(sc, txaggr_compretries);
  1447. for(ds = bf->bf_desc, i = 0; i < bf->bf_dmamap_info.nsegs; ds++, i++) {
  1448. ah->ah_clr11nAggr(ds);
  1449. ah->ah_set11nBurstDuration(ds, 0);
  1450. ah->ah_set11nVirtualMoreFrag(ds, 0);
  1451. }
  1452. if (bf->bf_retries >= OWLMAX_RETRIES) {
  1453. __stats(sc, txaggr_xretries);
  1454. ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1455. ath_tx_status_update_aggr(sc, bf, bf->bf_lastds, NULL, 0);
  1456. if (!*bar)
  1457. *bar = bf;
  1458. else
  1459. ath_tx_freebuf(sc, bf);
  1460. return;
  1461. }
  1462. if (!bf->bf_next) {
  1463. __stats(sc, txaggr_errlast);
  1464. bf = ath_buf_toggle(sc, bf, 1);
  1465. } else
  1466. bf->bf_lastds = &(bf->bf_descarr[bf->bf_dmamap_info.nsegs - 1]);
  1467. ath_tx_set_retry(sc, bf);
  1468. asf_tailq_insert_tail(bf_q, bf, bf_list);
  1469. }
  1470. static void
  1471. ath_update_aggr_stats(struct ath_softc_tgt *sc,
  1472. struct ath_tx_desc *ds, int nframes,
  1473. int nbad)
  1474. {
  1475. u_int8_t status = ATH_DS_TX_STATUS(ds);
  1476. u_int8_t txflags = ATH_DS_TX_FLAGS(ds);
  1477. __statsn(sc, txaggr_longretries, ds->ds_txstat.ts_longretry);
  1478. __statsn(sc, txaggr_shortretries, ds->ds_txstat.ts_shortretry);
  1479. if (txflags & HAL_TX_DESC_CFG_ERR)
  1480. __stats(sc, txaggr_desc_cfgerr);
  1481. if (txflags & HAL_TX_DATA_UNDERRUN)
  1482. __stats(sc, txaggr_data_urun);
  1483. if (txflags & HAL_TX_DELIM_UNDERRUN)
  1484. __stats(sc, txaggr_delim_urun);
  1485. if (!status) {
  1486. return;
  1487. }
  1488. if (status & HAL_TXERR_XRETRY)
  1489. __stats(sc, txaggr_compxretry);
  1490. if (status & HAL_TXERR_FILT)
  1491. __stats(sc, txaggr_filtered);
  1492. if (status & HAL_TXERR_FIFO)
  1493. __stats(sc, txaggr_fifo);
  1494. if (status & HAL_TXERR_XTXOP)
  1495. __stats(sc, txaggr_xtxop);
  1496. if (status & HAL_TXERR_TIMER_EXPIRED)
  1497. __stats(sc, txaggr_timer_exp);
  1498. }
  1499. static void
  1500. ath_tx_comp_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1501. {
  1502. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  1503. ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1504. struct ath_tx_desc *ds = bf->bf_lastds;
  1505. ath_update_stats(sc, bf);
  1506. ath_rate_tx_complete(sc, an, ds, bf->bf_rcs, 1, 0);
  1507. if (ATH_DS_TX_STATUS(ds) & HAL_TXERR_XRETRY) {
  1508. ath_tx_retry_unaggr(sc, bf);
  1509. return;
  1510. }
  1511. __stats(sc, tx_compunaggr);
  1512. ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1513. ath_tx_status_update(sc, bf);
  1514. ath_tx_freebuf(sc, bf);
  1515. }
  1516. static inline void
  1517. ath_tx_retry_unaggr(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1518. {
  1519. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  1520. ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1521. struct ath_txq *txq;
  1522. txq = bf->bf_txq;
  1523. if (bf->bf_retries >= OWLMAX_RETRIES) {
  1524. __stats(sc, txunaggr_xretry);
  1525. ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1526. ath_tx_status_update(sc, bf);
  1527. ath_bar_tx(sc, tid, bf);
  1528. return;
  1529. }
  1530. __stats(sc, txunaggr_compretries);
  1531. if (!bf->bf_lastds->ds_link) {
  1532. __stats(sc, txunaggr_errlast);
  1533. bf = ath_buf_toggle(sc, bf, 1);
  1534. }
  1535. ath_tx_set_retry(sc, bf);
  1536. asf_tailq_insert_head(&tid->buf_q, bf, bf_list);
  1537. ath_tgt_tx_enqueue(txq, tid);
  1538. }
  1539. static void
  1540. ath_tx_update_baw(ath_atx_tid_t *tid, int seqno)
  1541. {
  1542. int index;
  1543. int cindex;
  1544. index = ATH_BA_INDEX(tid->seq_start, seqno);
  1545. cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
  1546. TX_BUF_BITMAP_CLR(tid->tx_buf_bitmap, cindex);
  1547. while (tid->baw_head != tid->baw_tail &&
  1548. (!TX_BUF_BITMAP_IS_SET(tid->tx_buf_bitmap, tid->baw_head))) {
  1549. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1550. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  1551. }
  1552. }
  1553. static void ath_tx_set_retry(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1554. {
  1555. struct ieee80211_frame *wh;
  1556. __stats(sc, txaggr_retries);
  1557. bf->bf_isretried = 1;
  1558. bf->bf_retries ++;
  1559. wh = ATH_SKB_2_WH(bf->bf_skb);
  1560. wh->i_fc[1] |= IEEE80211_FC1_RETRY;
  1561. }
  1562. void ath_tgt_tx_cleanup(struct ath_softc_tgt *sc, struct ath_node_target *an,
  1563. ath_atx_tid_t *tid, a_uint8_t discard_all)
  1564. {
  1565. struct ath_tx_buf *bf;
  1566. struct ath_tx_buf *bf_next;
  1567. struct ath_txq *txq;
  1568. txq = TID_TO_ACTXQ(tid->tidno);
  1569. bf = asf_tailq_first(&tid->buf_q);
  1570. while (bf) {
  1571. if (discard_all || bf->bf_isretried) {
  1572. bf_next = asf_tailq_next(bf, bf_list);
  1573. TAILQ_DEQ(&tid->buf_q, bf, bf_list);
  1574. if (bf->bf_isretried)
  1575. ath_tx_update_baw(tid, SEQNO_FROM_BF_SEQNO(bf->bf_seqno));
  1576. ath_tx_freebuf(sc, bf);
  1577. bf = bf_next;
  1578. continue;
  1579. }
  1580. bf->bf_comp = ath_tgt_tx_comp_normal;
  1581. bf = asf_tailq_next(bf, bf_list);
  1582. }
  1583. ath_aggr_pause_tid(sc, tid);
  1584. while (tid->baw_head != tid->baw_tail) {
  1585. if (TX_BUF_BITMAP_IS_SET(tid->tx_buf_bitmap, tid->baw_head)) {
  1586. tid->incomp++;
  1587. tid->flag |= TID_CLEANUP_INPROGRES;
  1588. TX_BUF_BITMAP_CLR(tid->tx_buf_bitmap, tid->baw_head);
  1589. }
  1590. INCR(tid->baw_head, ATH_TID_MAX_BUFS);
  1591. INCR(tid->seq_start, IEEE80211_SEQ_MAX);
  1592. }
  1593. if (!(tid->flag & TID_CLEANUP_INPROGRES)) {
  1594. ath_aggr_resume_tid(sc, tid);
  1595. }
  1596. }
  1597. /******************/
  1598. /* BAR Management */
  1599. /******************/
  1600. static void ath_tgt_delba_send(struct ath_softc_tgt *sc,
  1601. struct ieee80211_node_target *ni,
  1602. a_uint8_t tidno, a_uint8_t initiator,
  1603. a_uint16_t reasoncode)
  1604. {
  1605. struct ath_node_target *an = ATH_NODE_TARGET(ni);
  1606. ath_atx_tid_t *tid = ATH_AN_2_TID(an, tidno);
  1607. struct wmi_data_delba wmi_delba;
  1608. tid->flag &= ~TID_AGGR_ENABLED;
  1609. ath_tgt_tx_cleanup(sc, an, tid, 1);
  1610. wmi_delba.ni_nodeindex = ni->ni_nodeindex;
  1611. wmi_delba.tidno = tid->tidno;
  1612. wmi_delba.initiator = 1;
  1613. wmi_delba.reasoncode = IEEE80211_REASON_UNSPECIFIED;
  1614. __stats(sc, txbar_xretry);
  1615. wmi_event(sc->tgt_wmi_handle,
  1616. WMI_DELBA_EVENTID,
  1617. &wmi_delba,
  1618. sizeof(wmi_delba));
  1619. }
  1620. static void ath_bar_retry(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1621. {
  1622. struct ath_node_target *an = ATH_NODE_TARGET(bf->bf_node);
  1623. ath_atx_tid_t *tid = ATH_AN_2_TID(an, bf->bf_tidno);
  1624. if (bf->bf_retries >= OWLMAX_BAR_RETRIES) {
  1625. ath_tgt_delba_send(sc, bf->bf_node, tid->tidno, 1,
  1626. IEEE80211_REASON_UNSPECIFIED);
  1627. ath_tgt_tid_drain(sc, tid);
  1628. bf->bf_comp = NULL;
  1629. ath_buf_comp(sc, bf);
  1630. return;
  1631. }
  1632. __stats(sc, txbar_compretries);
  1633. if (!bf->bf_lastds->ds_link) {
  1634. __stats(sc, txbar_errlast);
  1635. bf = ath_buf_toggle(sc, bf, 1);
  1636. }
  1637. bf->bf_lastds->ds_link = 0;
  1638. ath_tx_set_retry(sc, bf);
  1639. ath_tgt_txq_add_ucast(sc, bf);
  1640. }
  1641. static void ath_bar_tx_comp(struct ath_softc_tgt *sc, struct ath_tx_buf *bf)
  1642. {
  1643. struct ath_tx_desc *ds = bf->bf_lastds;
  1644. struct ath_node_target *an;
  1645. ath_atx_tid_t *tid;
  1646. struct ath_txq *txq;
  1647. an = (struct ath_node_target *)bf->bf_node;
  1648. tid = &an->tid[bf->bf_tidno];
  1649. txq = TID_TO_ACTXQ(tid->tidno);
  1650. if (ATH_DS_TX_STATUS(ds) & HAL_TXERR_XRETRY) {
  1651. ath_bar_retry(sc, bf);
  1652. return;
  1653. }
  1654. ath_aggr_resume_tid(sc, tid);
  1655. bf->bf_comp = NULL;
  1656. ath_buf_comp(sc, bf);
  1657. }
  1658. static void ath_bar_tx(struct ath_softc_tgt *sc,
  1659. ath_atx_tid_t *tid, struct ath_tx_buf *bf)
  1660. {
  1661. adf_nbuf_t skb;
  1662. struct ieee80211_frame_bar *bar;
  1663. u_int8_t min_rate;
  1664. struct ath_tx_desc *ds, *ds0;
  1665. struct ath_hal *ah = sc->sc_ah;
  1666. HAL_11N_RATE_SERIES series[4];
  1667. int i = 0;
  1668. adf_nbuf_queue_t skbhead;
  1669. a_uint8_t *anbdata;
  1670. a_uint32_t anblen;
  1671. __stats(sc, tx_bars);
  1672. adf_os_mem_set(&series, 0, sizeof(series));
  1673. ath_aggr_pause_tid(sc, tid);
  1674. skb = adf_nbuf_queue_remove(&bf->bf_skbhead);
  1675. adf_nbuf_peek_header(skb, &anbdata, &anblen);
  1676. adf_nbuf_trim_tail(skb, anblen);
  1677. bar = (struct ieee80211_frame_bar *) anbdata;
  1678. min_rate = 0x0b;
  1679. ath_dma_unmap(sc, bf);
  1680. adf_nbuf_queue_add(&bf->bf_skbhead, skb);
  1681. bar->i_fc[1] = IEEE80211_FC1_DIR_NODS;
  1682. bar->i_fc[0] = IEEE80211_FC0_VERSION_0 |
  1683. IEEE80211_FC0_TYPE_CTL |
  1684. IEEE80211_FC0_SUBTYPE_BAR;
  1685. bar->i_ctl = tid->tidno << IEEE80211_BAR_CTL_TID_S |
  1686. IEEE80211_BAR_CTL_COMBA;
  1687. bar->i_seq = adf_os_cpu_to_le16(tid->seq_start << IEEE80211_SEQ_SEQ_SHIFT);
  1688. bf->bf_seqno = tid->seq_start << IEEE80211_SEQ_SEQ_SHIFT;
  1689. adf_nbuf_put_tail(skb, sizeof(struct ieee80211_frame_bar));
  1690. bf->bf_comp = ath_bar_tx_comp;
  1691. bf->bf_tidno = tid->tidno;
  1692. bf->bf_node = &tid->an->ni;
  1693. ath_dma_map(sc, bf);
  1694. adf_nbuf_dmamap_info(bf->bf_dmamap, &bf->bf_dmamap_info);
  1695. ds = bf->bf_desc;
  1696. ah->ah_setupTxDesc(ds
  1697. , adf_nbuf_len(skb) + IEEE80211_CRC_LEN
  1698. , 0
  1699. , HAL_PKT_TYPE_NORMAL
  1700. , ATH_MIN(60, 60)
  1701. , min_rate
  1702. , ATH_TXMAXTRY
  1703. , bf->bf_keyix
  1704. , HAL_TXDESC_INTREQ
  1705. | HAL_TXDESC_CLRDMASK
  1706. , 0, 0);
  1707. skbhead = bf->bf_skbhead;
  1708. bf->bf_isaggr = 0;
  1709. bf->bf_next = NULL;
  1710. for (ds0 = ds, i=0; i < bf->bf_dmamap_info.nsegs; ds0++, i++) {
  1711. ah->ah_clr11nAggr(ds0);
  1712. }
  1713. ath_filltxdesc(sc, bf);
  1714. for (i = 0 ; i < 4; i++) {
  1715. series[i].Tries = ATH_TXMAXTRY;
  1716. series[i].Rate = min_rate;
  1717. series[i].ChSel = sc->sc_ic.ic_tx_chainmask;
  1718. }
  1719. ah->ah_set11nRateScenario(bf->bf_desc, 0, 0, series, 4, 4);
  1720. ath_tgt_txq_add_ucast(sc, bf);
  1721. }