1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
|
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_trans_h__
#define __iwl_trans_h__
#include <linux/ieee80211.h>
#include <linux/mm.h> /* for page_address */
#include "iwl-shared.h"
#include "iwl-debug.h"
/**
* DOC: Transport layer - what is it ?
*
* The tranport layer is the layer that deals with the HW directly. It provides
* an abstraction of the underlying HW to the upper layer. The transport layer
* doesn't provide any policy, algorithm or anything of this kind, but only
* mechanisms to make the HW do something.It is not completely stateless but
* close to it.
* We will have an implementation for each different supported bus.
*/
/**
* DOC: Life cycle of the transport layer
*
* The transport layer has a very precise life cycle.
*
* 1) A helper function is called during the module initialization and
* registers the bus driver's ops with the transport's alloc function.
* 2) Bus's probe calls to the transport layer's allocation functions.
* Of course this function is bus specific.
* 3) This allocation functions will spawn the upper layer which will
* register mac80211.
*
* 4) At some point (i.e. mac80211's start call), the op_mode will call
* the following sequence:
* start_hw
* start_fw
*
* 5) Then when finished (or reset):
* stop_fw (a.k.a. stop device for the moment)
* stop_hw
*
* 6) Eventually, the free function will be called.
*/
struct iwl_priv;
struct iwl_shared;
struct iwl_op_mode;
struct fw_img;
struct sk_buff;
struct dentry;
/**
* DOC: Host command section
*
* A host command is a commaned issued by the upper layer to the fw. There are
* several versions of fw that have several APIs. The transport layer is
* completely agnostic to these differences.
* The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
*/
#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
/**
* enum CMD_MODE - how to send the host commands ?
*
* @CMD_SYNC: The caller will be stalled until the fw responds to the command
* @CMD_ASYNC: Return right away and don't want for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response.
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_ON_DEMAND = BIT(2),
};
#define DEF_CMD_PAYLOAD_SIZE 320
/**
* struct iwl_device_cmd
*
* For allocation of the command and tx queues, this establishes the overall
* size of the largest command we send to uCode, except for commands that
* aren't fully copied and use other TFD space.
*/
struct iwl_device_cmd {
struct iwl_cmd_header hdr; /* uCode API */
u8 payload[DEF_CMD_PAYLOAD_SIZE];
} __packed;
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
#define IWL_MAX_CMD_TFDS 2
/**
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
* IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
* rather copies it to an previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
* buffer. This can save memcpy and is worth with very big comamnds.
*/
enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_NOCOPY = BIT(0),
};
/**
* struct iwl_host_cmd - Host command to the uCode
*
* @data: array of chunks that composes the data of the host command
* @resp_pkt: response packet, if %CMD_WANT_SKB was set
* @_rx_page_order: (internally used to free response packet)
* @_rx_page_addr: (internally used to free response packet)
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
* @len: array of the lenths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
* @id: id of the host command
*/
struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TFDS];
struct iwl_rx_packet *resp_pkt;
unsigned long _rx_page_addr;
u32 _rx_page_order;
int handler_status;
u32 flags;
u16 len[IWL_MAX_CMD_TFDS];
u8 dataflags[IWL_MAX_CMD_TFDS];
u8 id;
};
static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
{
free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
}
struct iwl_rx_cmd_buffer {
struct page *_page;
};
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
{
return page_address(r->_page);
}
static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
{
struct page *p = r->_page;
r->_page = NULL;
return p;
}
/**
* struct iwl_trans_ops - transport specific operations
*
* All the handlers MUST be implemented
*
* @start_hw: starts the HW- from that point on, the HW can send interrupts
* May sleep
* @stop_hw: stops the HW- from that point on, the HW will be in low power but
* will still issue interrupt if the HW RF kill is triggered.
* May sleep
* @start_fw: allocates and inits all the resources for the transport
* layer. Also kick a fw image.
* May sleep
* @fw_alive: called when the fw sends alive notification
* May sleep
* @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
* @stop_device:stops the whole device (embedded CPU put to reset)
* May sleep
* @wowlan_suspend: put the device into the correct mode for WoWLAN during
* suspend. This is optional, if not implemented WoWLAN will not be
* supported. This callback may sleep.
* @send_cmd:send a host command
* May sleep only if CMD_SYNC is set
* @tx: send an skb
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
* @tx_agg_alloc: allocate resources for a TX BA session
* Must be atomic
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
* May sleep
* @tx_agg_disable: de-configure a Tx queue to send AMPDUs
* May sleep
* @free: release all the ressource for the transport layer itself such as
* irq, tasklet etc... From this point on, the device may not issue
* any interrupt (incl. RFKILL).
* May sleep
* @stop_queue: stop a specific queue
* @check_stuck_queue: check if a specific queue is stuck
* @wait_tx_queue_empty: wait until all tx queues are empty
* May sleep
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
* @suspend: stop the device unless WoWLAN is configured
* @resume: resume activity of the device
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
*/
struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
void (*fw_alive)(struct iwl_trans *trans);
void (*stop_device)(struct iwl_trans *trans);
void (*wowlan_suspend)(struct iwl_trans *trans);
void (*wake_any_queue)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
const char *msg);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id, u8 tid);
int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs);
int (*tx_agg_disable)(struct iwl_trans *trans,
int sta_id, int tid);
int (*tx_agg_alloc)(struct iwl_trans *trans,
int sta_id, int tid);
void (*tx_agg_setup)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id, int tid,
int frame_limit, u16 ssn);
void (*free)(struct iwl_trans *trans);
void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
#ifdef CONFIG_PM_SLEEP
int (*suspend)(struct iwl_trans *trans);
int (*resume)(struct iwl_trans *trans);
#endif
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
};
/**
* enum iwl_trans_state - state of the transport layer
*
* @IWL_TRANS_NO_FW: no fw has sent an alive response
* @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
*/
enum iwl_trans_state {
IWL_TRANS_NO_FW = 0,
IWL_TRANS_FW_ALIVE = 1,
};
/**
* struct iwl_trans - transport common data
*
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
* @reg_lock - protect hw register access
* @dev - pointer to struct device * that represents the device
* @irq - the irq number for the device
* @hw_id: a u32 with the ID of the device / subdevice.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @ucode_write_complete: indicates that the ucode has been copied.
* @nvm_device_type: indicates OTP or eeprom
* @pm_support: set to true in start_hw if link pm is supported
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
struct iwl_shared *shrd;
enum iwl_trans_state state;
spinlock_t reg_lock;
struct device *dev;
unsigned int irq;
u32 hw_rev;
u32 hw_id;
char hw_id_str[52];
u8 ucode_write_complete;
int nvm_device_type;
bool pm_support;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
};
static inline void iwl_trans_configure(struct iwl_trans *trans,
struct iwl_op_mode *op_mode)
{
/*
* only set the op_mode for the moment. Later on, this function will do
* more
*/
trans->op_mode = op_mode;
}
static inline int iwl_trans_start_hw(struct iwl_trans *trans)
{
might_sleep();
return trans->ops->start_hw(trans);
}
static inline void iwl_trans_stop_hw(struct iwl_trans *trans)
{
might_sleep();
trans->ops->stop_hw(trans);
trans->state = IWL_TRANS_NO_FW;
}
static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
{
might_sleep();
trans->ops->fw_alive(trans);
trans->state = IWL_TRANS_FW_ALIVE;
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
const struct fw_img *fw)
{
might_sleep();
return trans->ops->start_fw(trans, fw);
}
static inline void iwl_trans_stop_device(struct iwl_trans *trans)
{
might_sleep();
trans->ops->stop_device(trans);
trans->state = IWL_TRANS_NO_FW;
}
static inline void iwl_trans_wowlan_suspend(struct iwl_trans *trans)
{
might_sleep();
trans->ops->wowlan_suspend(trans);
}
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
const char *msg)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->wake_any_queue(trans, ctx, msg);
}
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->send_cmd(trans, cmd);
}
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
u8 sta_id, u8 tid)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
}
static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
int tid, int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
status, skbs);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
int sta_id, int tid)
{
might_sleep();
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->tx_agg_disable(trans, sta_id, tid);
}
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
int sta_id, int tid)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->tx_agg_alloc(trans, sta_id, tid);
}
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
int sta_id, int tid,
int frame_limit, u16 ssn)
{
might_sleep();
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
}
static inline void iwl_trans_free(struct iwl_trans *trans)
{
trans->ops->free(trans);
}
static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
const char *msg)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
trans->ops->stop_queue(trans, q, msg);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->wait_tx_queue_empty(trans);
}
static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
{
if (trans->state != IWL_TRANS_FW_ALIVE)
IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
return trans->ops->check_stuck_queue(trans, q);
}
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
struct dentry *dir)
{
return trans->ops->dbgfs_register(trans, dir);
}
#ifdef CONFIG_PM_SLEEP
static inline int iwl_trans_suspend(struct iwl_trans *trans)
{
return trans->ops->suspend(trans);
}
static inline int iwl_trans_resume(struct iwl_trans *trans)
{
return trans->ops->resume(trans);
}
#endif
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trans->ops->write8(trans, ofs, val);
}
static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
trans->ops->write32(trans, ofs, val);
}
static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
return trans->ops->read32(trans, ofs);
}
/*****************************************************
* Transport layers implementations + their allocation function
******************************************************/
struct pci_dev;
struct pci_device_id;
extern const struct iwl_trans_ops trans_ops_pcie;
struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
struct pci_dev *pdev,
const struct pci_device_id *ent);
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
extern const struct iwl_trans_ops trans_ops_idi;
struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
void *pdev_void,
const void *ent_void);
#endif /* __iwl_trans_h__ */
|