diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-28 01:33:56 (GMT) |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-28 01:33:56 (GMT) |
commit | febb02bdfe5e2c6ceaa0a38d8b7afca3d98f415a (patch) | |
tree | f2ea606a30cb11f0d914271b54551627dad26eff | |
parent | 0b4d569de222452bcb55a4a536ade6cf4d8d1e30 (diff) | |
parent | 32a0f488ce5e8a9a148491f15edc508ab5e8265b (diff) | |
download | linux-febb02bdfe5e2c6ceaa0a38d8b7afca3d98f415a.tar.xz |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6: (53 commits)
DVB: firedtv: FireDTV S2 problems with tuning solved
DVB: firedtv: fix printk format mismatch
ieee1394: constify device ID tables
ieee1394: raw1394: add sparse annotations to raw1394_compat_write
ieee1394: Storage class should be before const qualifier
ieee1394: sbp2: follow up on "ieee1394: inherit ud vendor_id from node vendor_id"
firewire: core: optimize propagation of BROADCAST_CHANNEL
firewire: core: simplify broadcast channel allocation
firewire: core: increase bus manager grace period
firewire: core: drop unused call parameters of close_transaction
firewire: cdev: add closure to async stream ioctl
firewire: cdev: simplify FW_CDEV_IOC_SEND_REQUEST return value
firewire: cdev: fix race of ioctl_send_request with bus reset
firewire: cdev: secure add_descriptor ioctl
firewire: cdev: amendment to "add ioctl to query maximum transmission speed"
firewire: broadcast channel support
firewire: implement asynchronous stream transmission
firewire: core: normalize a function argument name
firewire: normalize a variable name
firewire: core: remove condition which is always false
...
-rw-r--r-- | drivers/firewire/fw-card.c | 149 | ||||
-rw-r--r-- | drivers/firewire/fw-cdev.c | 1044 | ||||
-rw-r--r-- | drivers/firewire/fw-device.c | 203 | ||||
-rw-r--r-- | drivers/firewire/fw-device.h | 23 | ||||
-rw-r--r-- | drivers/firewire/fw-iso.c | 227 | ||||
-rw-r--r-- | drivers/firewire/fw-ohci.c | 260 | ||||
-rw-r--r-- | drivers/firewire/fw-sbp2.c | 57 | ||||
-rw-r--r-- | drivers/firewire/fw-topology.c | 29 | ||||
-rw-r--r-- | drivers/firewire/fw-topology.h | 19 | ||||
-rw-r--r-- | drivers/firewire/fw-transaction.c | 185 | ||||
-rw-r--r-- | drivers/firewire/fw-transaction.h | 138 | ||||
-rw-r--r-- | drivers/ieee1394/csr.c | 8 | ||||
-rw-r--r-- | drivers/ieee1394/dv1394.c | 2 | ||||
-rw-r--r-- | drivers/ieee1394/eth1394.c | 4 | ||||
-rw-r--r-- | drivers/ieee1394/highlevel.c | 2 | ||||
-rw-r--r-- | drivers/ieee1394/nodemgr.c | 4 | ||||
-rw-r--r-- | drivers/ieee1394/nodemgr.h | 2 | ||||
-rw-r--r-- | drivers/ieee1394/raw1394.c | 14 | ||||
-rw-r--r-- | drivers/ieee1394/sbp2.c | 9 | ||||
-rw-r--r-- | drivers/ieee1394/video1394.c | 2 | ||||
-rw-r--r-- | drivers/media/dvb/firewire/firedtv-avc.c | 10 | ||||
-rw-r--r-- | include/linux/firewire-cdev.h | 218 |
22 files changed, 1684 insertions, 925 deletions
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index a5dd7a6..8b8c8c2 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c @@ -63,8 +63,7 @@ static int descriptor_count; #define BIB_CMC ((1) << 30) #define BIB_IMC ((1) << 31) -static u32 * -generate_config_rom(struct fw_card *card, size_t *config_rom_length) +static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length) { struct fw_descriptor *desc; static u32 config_rom[256]; @@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length) return config_rom; } -static void -update_config_roms(void) +static void update_config_roms(void) { struct fw_card *card; u32 *config_rom; @@ -141,8 +139,7 @@ update_config_roms(void) } } -int -fw_core_add_descriptor(struct fw_descriptor *desc) +int fw_core_add_descriptor(struct fw_descriptor *desc) { size_t i; @@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc) return 0; } -void -fw_core_remove_descriptor(struct fw_descriptor *desc) +void fw_core_remove_descriptor(struct fw_descriptor *desc) { mutex_lock(&card_mutex); @@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc) mutex_unlock(&card_mutex); } +static int set_broadcast_channel(struct device *dev, void *data) +{ + fw_device_set_broadcast_channel(fw_device(dev), (long)data); + return 0; +} + +static void allocate_broadcast_channel(struct fw_card *card, int generation) +{ + int channel, bandwidth = 0; + + fw_iso_resource_manage(card, generation, 1ULL << 31, + &channel, &bandwidth, true); + if (channel == 31) { + card->broadcast_channel_allocated = true; + device_for_each_child(card->device, (void *)(long)generation, + set_broadcast_channel); + } +} + static const char gap_count_table[] = { 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 }; -void -fw_schedule_bm_work(struct fw_card *card, unsigned long delay) +void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) { int scheduled; @@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay) fw_card_put(card); } -static void -fw_card_bm_work(struct work_struct *work) +static void fw_card_bm_work(struct work_struct *work) { struct fw_card *card = container_of(work, struct fw_card, work.work); struct fw_device *root_device; - struct fw_node *root_node, *local_node; + struct fw_node *root_node; unsigned long flags; - int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; + int root_id, new_root_id, irm_id, local_id; + int gap_count, generation, grace, rcode; bool do_reset = false; bool root_device_is_running; bool root_device_is_cmc; __be32 lock_data[2]; spin_lock_irqsave(&card->lock, flags); - local_node = card->local_node; - root_node = card->root_node; - if (local_node == NULL) { + if (card->local_node == NULL) { spin_unlock_irqrestore(&card->lock, flags); goto out_put_card; } - fw_node_get(local_node); - fw_node_get(root_node); generation = card->generation; + root_node = card->root_node; + fw_node_get(root_node); root_device = root_node->data; root_device_is_running = root_device && atomic_read(&root_device->state) == FW_DEVICE_RUNNING; root_device_is_cmc = root_device && root_device->cmc; - root_id = root_node->node_id; - grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); + root_id = root_node->node_id; + irm_id = card->irm_node->node_id; + local_id = card->local_node->node_id; + + grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8)); if (is_next_generation(generation, card->bm_generation) || (card->bm_generation != generation && grace)) { @@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work) * next generation. */ - irm_id = card->irm_node->node_id; if (!card->irm_node->link_on) { - new_root_id = local_node->node_id; + new_root_id = local_id; fw_notify("IRM has link off, making local node (%02x) root.\n", new_root_id); goto pick_me; } lock_data[0] = cpu_to_be32(0x3f); - lock_data[1] = cpu_to_be32(local_node->node_id); + lock_data[1] = cpu_to_be32(local_id); spin_unlock_irqrestore(&card->lock, flags); @@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work) goto out; if (rcode == RCODE_COMPLETE && - lock_data[0] != cpu_to_be32(0x3f)) - /* Somebody else is BM, let them do the work. */ + lock_data[0] != cpu_to_be32(0x3f)) { + + /* Somebody else is BM. Only act as IRM. */ + if (local_id == irm_id) + allocate_broadcast_channel(card, generation); + goto out; + } spin_lock_irqsave(&card->lock, flags); @@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work) * do a bus reset and pick the local node as * root, and thus, IRM. */ - new_root_id = local_node->node_id; + new_root_id = local_id; fw_notify("BM lock failed, making local node (%02x) root.\n", new_root_id); goto pick_me; } } else if (card->bm_generation != generation) { /* - * OK, we weren't BM in the last generation, and it's - * less than 100ms since last bus reset. Reschedule - * this task 100ms from now. + * We weren't BM in the last generation, and the last + * bus reset is less than 125ms ago. Reschedule this job. */ spin_unlock_irqrestore(&card->lock, flags); - fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10)); + fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8)); goto out; } @@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work) * Either link_on is false, or we failed to read the * config rom. In either case, pick another root. */ - new_root_id = local_node->node_id; + new_root_id = local_id; } else if (!root_device_is_running) { /* * If we haven't probed this device yet, bail out now @@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work) * successfully read the config rom, but it's not * cycle master capable. */ - new_root_id = local_node->node_id; + new_root_id = local_id; } pick_me: @@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work) card->index, new_root_id, gap_count); fw_send_phy_config(card, new_root_id, generation, gap_count); fw_core_initiate_bus_reset(card, 1); + /* Will allocate broadcast channel after the reset. */ + } else { + if (local_id == irm_id) + allocate_broadcast_channel(card, generation); } + out: fw_node_put(root_node); - fw_node_put(local_node); out_put_card: fw_card_put(card); } -static void -flush_timer_callback(unsigned long data) +static void flush_timer_callback(unsigned long data) { struct fw_card *card = (struct fw_card *)data; fw_flush_transactions(card); } -void -fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, - struct device *device) +void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, + struct device *device) { static atomic_t index = ATOMIC_INIT(-1); @@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, } EXPORT_SYMBOL(fw_card_initialize); -int -fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid) +int fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid) { u32 *config_rom; size_t length; - int err; + int ret; card->max_receive = max_receive; card->link_speed = link_speed; @@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card, list_add_tail(&card->link, &card_list); mutex_unlock(&card_mutex); - err = card->driver->enable(card, config_rom, length); - if (err < 0) { + ret = card->driver->enable(card, config_rom, length); + if (ret < 0) { mutex_lock(&card_mutex); list_del(&card->link); mutex_unlock(&card_mutex); } - return err; + + return ret; } EXPORT_SYMBOL(fw_card_add); @@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add); * dummy driver just fails all IO. */ -static int -dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) +static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length) { BUG(); return -1; } -static int -dummy_update_phy_reg(struct fw_card *card, int address, - int clear_bits, int set_bits) +static int dummy_update_phy_reg(struct fw_card *card, int address, + int clear_bits, int set_bits) { return -ENODEV; } -static int -dummy_set_config_rom(struct fw_card *card, - u32 *config_rom, size_t length) +static int dummy_set_config_rom(struct fw_card *card, + u32 *config_rom, size_t length) { /* * We take the card out of card_list before setting the dummy @@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card, return -1; } -static void -dummy_send_request(struct fw_card *card, struct fw_packet *packet) +static void dummy_send_request(struct fw_card *card, struct fw_packet *packet) { packet->callback(packet, card, -ENODEV); } -static void -dummy_send_response(struct fw_card *card, struct fw_packet *packet) +static void dummy_send_response(struct fw_card *card, struct fw_packet *packet) { packet->callback(packet, card, -ENODEV); } -static int -dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) +static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet) { return -ENOENT; } -static int -dummy_enable_phys_dma(struct fw_card *card, - int node_id, int generation) +static int dummy_enable_phys_dma(struct fw_card *card, + int node_id, int generation) { return -ENODEV; } @@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = { .enable_phys_dma = dummy_enable_phys_dma, }; -void -fw_card_release(struct kref *kref) +void fw_card_release(struct kref *kref) { struct fw_card *card = container_of(kref, struct fw_card, kref); complete(&card->done); } -void -fw_core_remove_card(struct fw_card *card) +void fw_core_remove_card(struct fw_card *card) { card->driver->update_phy_reg(card, 4, PHY_LINK_ACTIVE | PHY_CONTENDER, 0); @@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card) } EXPORT_SYMBOL(fw_core_remove_card); -int -fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) +int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset) { int reg = short_reset ? 5 : 1; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index ed03234..7eb6594 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c @@ -18,87 +18,162 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/wait.h> -#include <linux/errno.h> +#include <linux/compat.h> +#include <linux/delay.h> #include <linux/device.h> -#include <linux/vmalloc.h> +#include <linux/errno.h> +#include <linux/firewire-cdev.h> +#include <linux/idr.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> #include <linux/poll.h> #include <linux/preempt.h> +#include <linux/spinlock.h> #include <linux/time.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/idr.h> -#include <linux/compat.h> -#include <linux/firewire-cdev.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + #include <asm/system.h> #include <asm/uaccess.h> -#include "fw-transaction.h" -#include "fw-topology.h" + #include "fw-device.h" +#include "fw-topology.h" +#include "fw-transaction.h" + +struct client { + u32 version; + struct fw_device *device; + + spinlock_t lock; + bool in_shutdown; + struct idr resource_idr; + struct list_head event_list; + wait_queue_head_t wait; + u64 bus_reset_closure; + + struct fw_iso_context *iso_context; + u64 iso_closure; + struct fw_iso_buffer buffer; + unsigned long vm_start; -struct client; -struct client_resource { struct list_head link; - void (*release)(struct client *client, struct client_resource *r); - u32 handle; + struct kref kref; }; +static inline void client_get(struct client *client) +{ + kref_get(&client->kref); +} + +static void client_release(struct kref *kref) +{ + struct client *client = container_of(kref, struct client, kref); + + fw_device_put(client->device); + kfree(client); +} + +static void client_put(struct client *client) +{ + kref_put(&client->kref, client_release); +} + +struct client_resource; +typedef void (*client_resource_release_fn_t)(struct client *, + struct client_resource *); +struct client_resource { + client_resource_release_fn_t release; + int handle; +}; + +struct address_handler_resource { + struct client_resource resource; + struct fw_address_handler handler; + __u64 closure; + struct client *client; +}; + +struct outbound_transaction_resource { + struct client_resource resource; + struct fw_transaction transaction; +}; + +struct inbound_transaction_resource { + struct client_resource resource; + struct fw_request *request; + void *data; + size_t length; +}; + +struct descriptor_resource { + struct client_resource resource; + struct fw_descriptor descriptor; + u32 data[0]; +}; + +struct iso_resource { + struct client_resource resource; + struct client *client; + /* Schedule work and access todo only with client->lock held. */ + struct delayed_work work; + enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, + ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; + int generation; + u64 channels; + s32 bandwidth; + struct iso_resource_event *e_alloc, *e_dealloc; +}; + +static void schedule_iso_resource(struct iso_resource *); +static void release_iso_resource(struct client *, struct client_resource *); + /* * dequeue_event() just kfree()'s the event, so the event has to be - * the first field in the struct. + * the first field in a struct XYZ_event. */ - struct event { struct { void *data; size_t size; } v[2]; struct list_head link; }; -struct bus_reset { +struct bus_reset_event { struct event event; struct fw_cdev_event_bus_reset reset; }; -struct response { +struct outbound_transaction_event { struct event event; - struct fw_transaction transaction; struct client *client; - struct client_resource resource; + struct outbound_transaction_resource r; struct fw_cdev_event_response response; }; -struct iso_interrupt { +struct inbound_transaction_event { struct event event; - struct fw_cdev_event_iso_interrupt interrupt; + struct fw_cdev_event_request request; }; -struct client { - u32 version; - struct fw_device *device; - spinlock_t lock; - u32 resource_handle; - struct list_head resource_list; - struct list_head event_list; - wait_queue_head_t wait; - u64 bus_reset_closure; - - struct fw_iso_context *iso_context; - u64 iso_closure; - struct fw_iso_buffer buffer; - unsigned long vm_start; +struct iso_interrupt_event { + struct event event; + struct fw_cdev_event_iso_interrupt interrupt; +}; - struct list_head link; +struct iso_resource_event { + struct event event; + struct fw_cdev_event_iso_resource resource; }; -static inline void __user * -u64_to_uptr(__u64 value) +static inline void __user *u64_to_uptr(__u64 value) { return (void __user *)(unsigned long)value; } -static inline __u64 -uptr_to_u64(void __user *ptr) +static inline __u64 uptr_to_u64(void __user *ptr) { return (__u64)(unsigned long)ptr; } @@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file) { struct fw_device *device; struct client *client; - unsigned long flags; device = fw_device_get_by_devt(inode->i_rdev); if (device == NULL) @@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) } client->device = device; - INIT_LIST_HEAD(&client->event_list); - INIT_LIST_HEAD(&client->resource_list); spin_lock_init(&client->lock); + idr_init(&client->resource_idr); + INIT_LIST_HEAD(&client->event_list); init_waitqueue_head(&client->wait); + kref_init(&client->kref); file->private_data = client; - spin_lock_irqsave(&device->card->lock, flags); + mutex_lock(&device->client_list_mutex); list_add_tail(&client->link, &device->client_list); - spin_unlock_irqrestore(&device->card->lock, flags); + mutex_unlock(&device->client_list_mutex); return 0; } @@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event, event->v[1].size = size1; spin_lock_irqsave(&client->lock, flags); - list_add_tail(&event->link, &client->event_list); + if (client->in_shutdown) + kfree(event); + else + list_add_tail(&event->link, &client->event_list); spin_unlock_irqrestore(&client->lock, flags); wake_up_interruptible(&client->wait); } -static int -dequeue_event(struct client *client, char __user *buffer, size_t count) +static int dequeue_event(struct client *client, + char __user *buffer, size_t count) { - unsigned long flags; struct event *event; size_t size, total; - int i, retval; + int i, ret; - retval = wait_event_interruptible(client->wait, - !list_empty(&client->event_list) || - fw_device_is_shutdown(client->device)); - if (retval < 0) - return retval; + ret = wait_event_interruptible(client->wait, + !list_empty(&client->event_list) || + fw_device_is_shutdown(client->device)); + if (ret < 0) + return ret; if (list_empty(&client->event_list) && fw_device_is_shutdown(client->device)) return -ENODEV; - spin_lock_irqsave(&client->lock, flags); - event = container_of(client->event_list.next, struct event, link); + spin_lock_irq(&client->lock); + event = list_first_entry(&client->event_list, struct event, link); list_del(&event->link); - spin_unlock_irqrestore(&client->lock, flags); + spin_unlock_irq(&client->lock); total = 0; for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { size = min(event->v[i].size, count - total); if (copy_to_user(buffer + total, event->v[i].data, size)) { - retval = -EFAULT; + ret = -EFAULT; goto out; } total += size; } - retval = total; + ret = total; out: kfree(event); - return retval; + return ret; } -static ssize_t -fw_device_op_read(struct file *file, - char __user *buffer, size_t count, loff_t *offset) +static ssize_t fw_device_op_read(struct file *file, char __user *buffer, + size_t count, loff_t *offset) { struct client *client = file->private_data; return dequeue_event(client, buffer, count); } -/* caller must hold card->lock so that node pointers can be dereferenced here */ -static void -fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, - struct client *client) +static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, + struct client *client) { struct fw_card *card = client->device->card; + spin_lock_irq(&card->lock); + event->closure = client->bus_reset_closure; event->type = FW_CDEV_EVENT_BUS_RESET; event->generation = client->device->generation; @@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, event->bm_node_id = 0; /* FIXME: We don't track the BM. */ event->irm_node_id = card->irm_node->node_id; event->root_node_id = card->root_node->node_id; + + spin_unlock_irq(&card->lock); } -static void -for_each_client(struct fw_device *device, - void (*callback)(struct client *client)) +static void for_each_client(struct fw_device *device, + void (*callback)(struct client *client)) { - struct fw_card *card = device->card; struct client *c; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); + mutex_lock(&device->client_list_mutex); list_for_each_entry(c, &device->client_list, link) callback(c); + mutex_unlock(&device->client_list_mutex); +} + +static int schedule_reallocations(int id, void *p, void *data) +{ + struct client_resource *r = p; - spin_unlock_irqrestore(&card->lock, flags); + if (r->release == release_iso_resource) + schedule_iso_resource(container_of(r, + struct iso_resource, resource)); + return 0; } -static void -queue_bus_reset_event(struct client *client) +static void queue_bus_reset_event(struct client *client) { - struct bus_reset *bus_reset; + struct bus_reset_event *e; - bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); - if (bus_reset == NULL) { + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (e == NULL) { fw_notify("Out of memory when allocating bus reset event\n"); return; } - fill_bus_reset_event(&bus_reset->reset, client); + fill_bus_reset_event(&e->reset, client); + + queue_event(client, &e->event, + &e->reset, sizeof(e->reset), NULL, 0); - queue_event(client, &bus_reset->event, - &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); + spin_lock_irq(&client->lock); + idr_for_each(&client->resource_idr, schedule_reallocations, client); + spin_unlock_irq(&client->lock); } void fw_device_cdev_update(struct fw_device *device) @@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer) { struct fw_cdev_get_info *get_info = buffer; struct fw_cdev_event_bus_reset bus_reset; - struct fw_card *card = client->device->card; unsigned long ret = 0; client->version = get_info->version; get_info->version = FW_CDEV_VERSION; + get_info->card = client->device->card->index; down_read(&fw_device_rwsem); @@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer) client->bus_reset_closure = get_info->bus_reset_closure; if (get_info->bus_reset != 0) { void __user *uptr = u64_to_uptr(get_info->bus_reset); - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); fill_bus_reset_event(&bus_reset, client); - spin_unlock_irqrestore(&card->lock, flags); - if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) return -EFAULT; } - get_info->card = card->index; - return 0; } -static void -add_client_resource(struct client *client, struct client_resource *resource) +static int add_client_resource(struct client *client, + struct client_resource *resource, gfp_t gfp_mask) { unsigned long flags; + int ret; + + retry: + if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) + return -ENOMEM; spin_lock_irqsave(&client->lock, flags); - list_add_tail(&resource->link, &client->resource_list); - resource->handle = client->resource_handle++; + if (client->in_shutdown) + ret = -ECANCELED; + else + ret = idr_get_new(&client->resource_idr, resource, + &resource->handle); + if (ret >= 0) { + client_get(client); + if (resource->release == release_iso_resource) + schedule_iso_resource(container_of(resource, + struct iso_resource, resource)); + } spin_unlock_irqrestore(&client->lock, flags); + + if (ret == -EAGAIN) + goto retry; + + return ret < 0 ? ret : 0; } -static int -release_client_resource(struct client *client, u32 handle, - struct client_resource **resource) +static int release_client_resource(struct client *client, u32 handle, + client_resource_release_fn_t release, + struct client_resource **resource) { struct client_resource *r; - unsigned long flags; - spin_lock_irqsave(&client->lock, flags); - list_for_each_entry(r, &client->resource_list, link) { - if (r->handle == handle) { - list_del(&r->link); - break; - } - } - spin_unlock_irqrestore(&client->lock, flags); + spin_lock_irq(&client->lock); + if (client->in_shutdown) + r = NULL; + else + r = idr_find(&client->resource_idr, handle); + if (r && r->release == release) + idr_remove(&client->resource_idr, handle); + spin_unlock_irq(&client->lock); - if (&r->link == &client->resource_list) + if (!(r && r->release == release)) return -EINVAL; if (resource) @@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle, else r->release(client, r); + client_put(client); + return 0; } -static void -release_transaction(struct client *client, struct client_resource *resource) +static void release_transaction(struct client *client, + struct client_resource *resource) { - struct response *response = - container_of(resource, struct response, resource); + struct outbound_transaction_resource *r = container_of(resource, + struct outbound_transaction_resource, resource); - fw_cancel_transaction(client->device->card, &response->transaction); + fw_cancel_transaction(client->device->card, &r->transaction); } -static void -complete_transaction(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) +static void complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) { - struct response *response = data; - struct client *client = response->client; + struct outbound_transaction_event *e = data; + struct fw_cdev_event_response *rsp = &e->response; + struct client *client = e->client; unsigned long flags; - struct fw_cdev_event_response *r = &response->response; - if (length < r->length) - r->length = length; + if (length < rsp->length) + rsp->length = length; if (rcode == RCODE_COMPLETE) - memcpy(r->data, payload, r->length); + memcpy(rsp->data, payload, rsp->length); spin_lock_irqsave(&client->lock, flags); - list_del(&response->resource.link); + /* + * 1. If called while in shutdown, the idr tree must be left untouched. + * The idr handle will be removed and the client reference will be + * dropped later. + * 2. If the call chain was release_client_resource -> + * release_transaction -> complete_transaction (instead of a normal + * conclusion of the transaction), i.e. if this resource was already + * unregistered from the idr, the client reference will be dropped + * by release_client_resource and we must not drop it here. + */ + if (!client->in_shutdown && + idr_find(&client->resource_idr, e->r.resource.handle)) { + idr_remove(&client->resource_idr, e->r.resource.handle); + /* Drop the idr's reference */ + client_put(client); + } spin_unlock_irqrestore(&client->lock, flags); - r->type = FW_CDEV_EVENT_RESPONSE; - r->rcode = rcode; + rsp->type = FW_CDEV_EVENT_RESPONSE; + rsp->rcode = rcode; /* - * In the case that sizeof(*r) doesn't align with the position of the + * In the case that sizeof(*rsp) doesn't align with the position of the * data, and the read is short, preserve an extra copy of the data * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless * for short reads and some apps depended on it, this is both safe * and prudent for compatibility. */ - if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) - queue_event(client, &response->event, r, sizeof(*r), - r->data, r->length); + if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) + queue_event(client, &e->event, rsp, sizeof(*rsp), + rsp->data, rsp->length); else - queue_event(client, &response->event, r, sizeof(*r) + r->length, + queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0); + + /* Drop the transaction callback's reference */ + client_put(client); } -static int ioctl_send_request(struct client *client, void *buffer) +static int init_request(struct client *client, + struct fw_cdev_send_request *request, + int destination_id, int speed) { - struct fw_device *device = client->device; - struct fw_cdev_send_request *request = buffer; - struct response *response; + struct outbound_transaction_event *e; + int ret; - /* What is the biggest size we'll accept, really? */ - if (request->length > 4096) - return -EINVAL; + if (request->tcode != TCODE_STREAM_DATA && + (request->length > 4096 || request->length > 512 << speed)) + return -EIO; - response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); - if (response == NULL) + e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); + if (e == NULL) return -ENOMEM; - response->client = client; - response->response.length = request->length; - response->response.closure = request->closure; + e->client = client; + e->response.length = request->length; + e->response.closure = request->closure; if (request->data && - copy_from_user(response->response.data, + copy_from_user(e->response.data, u64_to_uptr(request->data), request->length)) { - kfree(response); - return -EFAULT; + ret = -EFAULT; + goto failed; } - response->resource.release = release_transaction; - add_client_resource(client, &response->resource); + e->r.resource.release = release_transaction; + ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); + if (ret < 0) + goto failed; - fw_send_request(device->card, &response->transaction, - request->tcode & 0x1f, - device->node->node_id, - request->generation, - device->max_speed, - request->offset, - response->response.data, request->length, - complete_transaction, response); + /* Get a reference for the transaction callback */ + client_get(client); - if (request->data) - return sizeof(request) + request->length; - else - return sizeof(request); + fw_send_request(client->device->card, &e->r.transaction, + request->tcode, destination_id, request->generation, + speed, request->offset, e->response.data, + request->length, complete_transaction, e); + return 0; + + failed: + kfree(e); + + return ret; } -struct address_handler { - struct fw_address_handler handler; - __u64 closure; - struct client *client; - struct client_resource resource; -}; +static int ioctl_send_request(struct client *client, void *buffer) +{ + struct fw_cdev_send_request *request = buffer; -struct request { - struct fw_request *request; - void *data; - size_t length; - struct client_resource resource; -}; + switch (request->tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + case TCODE_READ_QUADLET_REQUEST: + case TCODE_READ_BLOCK_REQUEST: + case TCODE_LOCK_MASK_SWAP: + case TCODE_LOCK_COMPARE_SWAP: + case TCODE_LOCK_FETCH_ADD: + case TCODE_LOCK_LITTLE_ADD: + case TCODE_LOCK_BOUNDED_ADD: + case TCODE_LOCK_WRAP_ADD: + case TCODE_LOCK_VENDOR_DEPENDENT: + break; + default: + return -EINVAL; + } -struct request_event { - struct event event; - struct fw_cdev_event_request request; -}; + return init_request(client, request, client->device->node_id, + client->device->max_speed); +} -static void -release_request(struct client *client, struct client_resource *resource) +static void release_request(struct client *client, + struct client_resource *resource) { - struct request *request = - container_of(resource, struct request, resource); + struct inbound_transaction_resource *r = container_of(resource, + struct inbound_transaction_resource, resource); - fw_send_response(client->device->card, request->request, + fw_send_response(client->device->card, r->request, RCODE_CONFLICT_ERROR); - kfree(request); + kfree(r); } -static void -handle_request(struct fw_card *card, struct fw_request *r, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void handle_request(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *payload, size_t length, void *callback_data) { - struct address_handler *handler = callback_data; - struct request *request; - struct request_event *e; - struct client *client = handler->client; + struct address_handler_resource *handler = callback_data; + struct inbound_transaction_resource *r; + struct inbound_transaction_event *e; + int ret; - request = kmalloc(sizeof(*request), GFP_ATOMIC); + r = kmalloc(sizeof(*r), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (request == NULL || e == NULL) { - kfree(request); - kfree(e); - fw_send_response(card, r, RCODE_CONFLICT_ERROR); - return; - } + if (r == NULL || e == NULL) + goto failed; - request->request = r; - request->data = payload; - request->length = length; + r->request = request; + r->data = payload; + r->length = length; - request->resource.release = release_request; - add_client_resource(client, &request->resource); + r->resource.release = release_request; + ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); + if (ret < 0) + goto failed; e->request.type = FW_CDEV_EVENT_REQUEST; e->request.tcode = tcode; e->request.offset = offset; e->request.length = length; - e->request.handle = request->resource.handle; + e->request.handle = r->resource.handle; e->request.closure = handler->closure; - queue_event(client, &e->event, + queue_event(handler->client, &e->event, &e->request, sizeof(e->request), payload, length); + return; + + failed: + kfree(r); + kfree(e); + fw_send_response(card, request, RCODE_CONFLICT_ERROR); } -static void -release_address_handler(struct client *client, - struct client_resource *resource) +static void release_address_handler(struct client *client, + struct client_resource *resource) { - struct address_handler *handler = - container_of(resource, struct address_handler, resource); + struct address_handler_resource *r = + container_of(resource, struct address_handler_resource, resource); - fw_core_remove_address_handler(&handler->handler); - kfree(handler); + fw_core_remove_address_handler(&r->handler); + kfree(r); } static int ioctl_allocate(struct client *client, void *buffer) { struct fw_cdev_allocate *request = buffer; - struct address_handler *handler; + struct address_handler_resource *r; struct fw_address_region region; + int ret; - handler = kmalloc(sizeof(*handler), GFP_KERNEL); - if (handler == NULL) + r = kmalloc(sizeof(*r), GFP_KERNEL); + if (r == NULL) return -ENOMEM; region.start = request->offset; region.end = request->offset + request->length; - handler->handler.length = request->length; - handler->handler.address_callback = handle_request; - handler->handler.callback_data = handler; - handler->closure = request->closure; - handler->client = client; - - if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) { - kfree(handler); - return -EBUSY; + r->handler.length = request->length; + r->handler.address_callback = handle_request; + r->handler.callback_data = r; + r->closure = request->closure; + r->client = client; + + ret = fw_core_add_address_handler(&r->handler, ®ion); + if (ret < 0) { + kfree(r); + return ret; } - handler->resource.release = release_address_handler; - add_client_resource(client, &handler->resource); - request->handle = handler->resource.handle; + r->resource.release = release_address_handler; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) { + release_address_handler(client, &r->resource); + return ret; + } + request->handle = r->resource.handle; return 0; } @@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer) { struct fw_cdev_deallocate *request = buffer; - return release_client_resource(client, request->handle, NULL); + return release_client_resource(client, request->handle, + release_address_handler, NULL); } static int ioctl_send_response(struct client *client, void *buffer) { struct fw_cdev_send_response *request = buffer; struct client_resource *resource; - struct request *r; + struct inbound_transaction_resource *r; - if (release_client_resource(client, request->handle, &resource) < 0) + if (release_client_resource(client, request->handle, + release_request, &resource) < 0) return -EINVAL; - r = container_of(resource, struct request, resource); + + r = container_of(resource, struct inbound_transaction_resource, + resource); if (request->length < r->length) r->length = request->length; if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) @@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer) return fw_core_initiate_bus_reset(client->device->card, short_reset); } -struct descriptor { - struct fw_descriptor d; - struct client_resource resource; - u32 data[0]; -}; - static void release_descriptor(struct client *client, struct client_resource *resource) { - struct descriptor *descriptor = - container_of(resource, struct descriptor, resource); + struct descriptor_resource *r = + container_of(resource, struct descriptor_resource, resource); - fw_core_remove_descriptor(&descriptor->d); - kfree(descriptor); + fw_core_remove_descriptor(&r->descriptor); + kfree(r); } static int ioctl_add_descriptor(struct client *client, void *buffer) { struct fw_cdev_add_descriptor *request = buffer; - struct descriptor *descriptor; - int retval; + struct fw_card *card = client->device->card; + struct descriptor_resource *r; + int ret; + + /* Access policy: Allow this ioctl only on local nodes' device files. */ + spin_lock_irq(&card->lock); + ret = client->device->node_id != card->local_node->node_id; + spin_unlock_irq(&card->lock); + if (ret) + return -ENOSYS; if (request->length > 256) return -EINVAL; - descriptor = - kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); - if (descriptor == NULL) + r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); + if (r == NULL) return -ENOMEM; - if (copy_from_user(descriptor->data, + if (copy_from_user(r->data, u64_to_uptr(request->data), request->length * 4)) { - kfree(descriptor); - return -EFAULT; + ret = -EFAULT; + goto failed; } - descriptor->d.length = request->length; - descriptor->d.immediate = request->immediate; - descriptor->d.key = request->key; - descriptor->d.data = descriptor->data; + r->descriptor.length = request->length; + r->descriptor.immediate = request->immediate; + r->descriptor.key = request->key; + r->descriptor.data = r->data; - retval = fw_core_add_descriptor(&descriptor->d); - if (retval < 0) { - kfree(descriptor); - return retval; - } + ret = fw_core_add_descriptor(&r->descriptor); + if (ret < 0) + goto failed; - descriptor->resource.release = release_descriptor; - add_client_resource(client, &descriptor->resource); - request->handle = descriptor->resource.handle; + r->resource.release = release_descriptor; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) { + fw_core_remove_descriptor(&r->descriptor); + goto failed; + } + request->handle = r->resource.handle; return 0; + failed: + kfree(r); + + return ret; } static int ioctl_remove_descriptor(struct client *client, void *buffer) { struct fw_cdev_remove_descriptor *request = buffer; - return release_client_resource(client, request->handle, NULL); + return release_client_resource(client, request->handle, + release_descriptor, NULL); } -static void -iso_callback(struct fw_iso_context *context, u32 cycle, - size_t header_length, void *header, void *data) +static void iso_callback(struct fw_iso_context *context, u32 cycle, + size_t header_length, void *header, void *data) { struct client *client = data; - struct iso_interrupt *irq; + struct iso_interrupt_event *e; - irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); - if (irq == NULL) + e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); + if (e == NULL) return; - irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; - irq->interrupt.closure = client->iso_closure; - irq->interrupt.cycle = cycle; - irq->interrupt.header_length = header_length; - memcpy(irq->interrupt.header, header, header_length); - queue_event(client, &irq->event, &irq->interrupt, - sizeof(irq->interrupt) + header_length, NULL, 0); + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; + e->interrupt.closure = client->iso_closure; + e->interrupt.cycle = cycle; + e->interrupt.header_length = header_length; + memcpy(e->interrupt.header, header, header_length); + queue_event(client, &e->event, &e->interrupt, + sizeof(e->interrupt) + header_length, NULL, 0); } static int ioctl_create_iso_context(struct client *client, void *buffer) @@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer) return 0; } +static void iso_resource_work(struct work_struct *work) +{ + struct iso_resource_event *e; + struct iso_resource *r = + container_of(work, struct iso_resource, work.work); + struct client *client = r->client; + int generation, channel, bandwidth, todo; + bool skip, free, success; + + spin_lock_irq(&client->lock); + generation = client->device->generation; + todo = r->todo; + /* Allow 1000ms grace period for other reallocations. */ + if (todo == ISO_RES_ALLOC && + time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { + if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) + client_get(client); + skip = true; + } else { + /* We could be called twice within the same generation. */ + skip = todo == ISO_RES_REALLOC && + r->generation == generation; + } + free = todo == ISO_RES_DEALLOC || + todo == ISO_RES_ALLOC_ONCE || + todo == ISO_RES_DEALLOC_ONCE; + r->generation = generation; + spin_unlock_irq(&client->lock); + + if (skip) + goto out; + + bandwidth = r->bandwidth; + + fw_iso_resource_manage(client->device->card, generation, + r->channels, &channel, &bandwidth, + todo == ISO_RES_ALLOC || + todo == ISO_RES_REALLOC || + todo == ISO_RES_ALLOC_ONCE); + /* + * Is this generation outdated already? As long as this resource sticks + * in the idr, it will be scheduled again for a newer generation or at + * shutdown. + */ + if (channel == -EAGAIN && + (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) + goto out; + + success = channel >= 0 || bandwidth > 0; + + spin_lock_irq(&client->lock); + /* + * Transit from allocation to reallocation, except if the client + * requested deallocation in the meantime. + */ + if (r->todo == ISO_RES_ALLOC) + r->todo = ISO_RES_REALLOC; + /* + * Allocation or reallocation failure? Pull this resource out of the + * idr and prepare for deletion, unless the client is shutting down. + */ + if (r->todo == ISO_RES_REALLOC && !success && + !client->in_shutdown && + idr_find(&client->resource_idr, r->resource.handle)) { + idr_remove(&client->resource_idr, r->resource.handle); + client_put(client); + free = true; + } + spin_unlock_irq(&client->lock); + + if (todo == ISO_RES_ALLOC && channel >= 0) + r->channels = 1ULL << channel; + + if (todo == ISO_RES_REALLOC && success) + goto out; + + if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { + e = r->e_alloc; + r->e_alloc = NULL; + } else { + e = r->e_dealloc; + r->e_dealloc = NULL; + } + e->resource.handle = r->resource.handle; + e->resource.channel = channel; + e->resource.bandwidth = bandwidth; + + queue_event(client, &e->event, + &e->resource, sizeof(e->resource), NULL, 0); + + if (free) { + cancel_delayed_work(&r->work); + kfree(r->e_alloc); + kfree(r->e_dealloc); + kfree(r); + } + out: + client_put(client); +} + +static void schedule_iso_resource(struct iso_resource *r) +{ + client_get(r->client); + if (!schedule_delayed_work(&r->work, 0)) + client_put(r->client); +} + +static void release_iso_resource(struct client *client, + struct client_resource *resource) +{ + struct iso_resource *r = + container_of(resource, struct iso_resource, resource); + + spin_lock_irq(&client->lock); + r->todo = ISO_RES_DEALLOC; + schedule_iso_resource(r); + spin_unlock_irq(&client->lock); +} + +static int init_iso_resource(struct client *client, + struct fw_cdev_allocate_iso_resource *request, int todo) +{ + struct iso_resource_event *e1, *e2; + struct iso_resource *r; + int ret; + + if ((request->channels == 0 && request->bandwidth == 0) || + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || + request->bandwidth < 0) + return -EINVAL; + + r = kmalloc(sizeof(*r), GFP_KERNEL); + e1 = kmalloc(sizeof(*e1), GFP_KERNEL); + e2 = kmalloc(sizeof(*e2), GFP_KERNEL); + if (r == NULL || e1 == NULL || e2 == NULL) { + ret = -ENOMEM; + goto fail; + } + + INIT_DELAYED_WORK(&r->work, iso_resource_work); + r->client = client; + r->todo = todo; + r->generation = -1; + r->channels = request->channels; + r->bandwidth = request->bandwidth; + r->e_alloc = e1; + r->e_dealloc = e2; + + e1->resource.closure = request->closure; + e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; + e2->resource.closure = request->closure; + e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; + + if (todo == ISO_RES_ALLOC) { + r->resource.release = release_iso_resource; + ret = add_client_resource(client, &r->resource, GFP_KERNEL); + if (ret < 0) + goto fail; + } else { + r->resource.release = NULL; + r->resource.handle = -1; + schedule_iso_resource(r); + } + request->handle = r->resource.handle; + + return 0; + fail: + kfree(r); + kfree(e1); + kfree(e2); + + return ret; +} + +static int ioctl_allocate_iso_resource(struct client *client, void *buffer) +{ + struct fw_cdev_allocate_iso_resource *request = buffer; + + return init_iso_resource(client, request, ISO_RES_ALLOC); +} + +static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) +{ + struct fw_cdev_deallocate *request = buffer; + + return release_client_resource(client, request->handle, + release_iso_resource, NULL); +} + +static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) +{ + struct fw_cdev_allocate_iso_resource *request = buffer; + + return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); +} + +static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) +{ + struct fw_cdev_allocate_iso_resource *request = buffer; + + return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); +} + +/* + * Returns a speed code: Maximum speed to or from this device, + * limited by the device's link speed, the local node's link speed, + * and all PHY port speeds between the two links. + */ +static int ioctl_get_speed(struct client *client, void *buffer) +{ + return client->device->max_speed; +} + +static int ioctl_send_broadcast_request(struct client *client, void *buffer) +{ + struct fw_cdev_send_request *request = buffer; + + switch (request->tcode) { + case TCODE_WRITE_QUADLET_REQUEST: + case TCODE_WRITE_BLOCK_REQUEST: + break; + default: + return -EINVAL; + } + + /* Security policy: Only allow accesses to Units Space. */ + if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) + return -EACCES; + + return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); +} + +static int ioctl_send_stream_packet(struct client *client, void *buffer) +{ + struct fw_cdev_send_stream_packet *p = buffer; + struct fw_cdev_send_request request; + int dest; + + if (p->speed > client->device->card->link_speed || + p->length > 1024 << p->speed) + return -EIO; + + if (p->tag > 3 || p->channel > 63 || p->sy > 15) + return -EINVAL; + + dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); + request.tcode = TCODE_STREAM_DATA; + request.length = p->length; + request.closure = p->closure; + request.data = p->data; + request.generation = p->generation; + + return init_request(client, &request, dest, p->speed); +} + static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_info, ioctl_send_request, @@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_start_iso, ioctl_stop_iso, ioctl_get_cycle_timer, + ioctl_allocate_iso_resource, + ioctl_deallocate_iso_resource, + ioctl_allocate_iso_resource_once, + ioctl_deallocate_iso_resource_once, + ioctl_get_speed, + ioctl_send_broadcast_request, + ioctl_send_stream_packet, }; -static int -dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) +static int dispatch_ioctl(struct client *client, + unsigned int cmd, void __user *arg) { char buffer[256]; - int retval; + int ret; if (_IOC_TYPE(cmd) != '#' || _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) @@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) return -EFAULT; } - retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); - if (retval < 0) - return retval; + ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); + if (ret < 0) + return ret; if (_IOC_DIR(cmd) & _IOC_READ) { if (_IOC_SIZE(cmd) > sizeof(buffer) || @@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) return -EFAULT; } - return retval; + return ret; } -static long -fw_device_op_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) +static long fw_device_op_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) { struct client *client = file->private_data; @@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file, } #ifdef CONFIG_COMPAT -static long -fw_device_op_compat_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) +static long fw_device_op_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) { struct client *client = file->private_data; @@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) struct client *client = file->private_data; enum dma_data_direction direction; unsigned long size; - int page_count, retval; + int page_count, ret; if (fw_device_is_shutdown(client->device)) return -ENODEV; @@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) else direction = DMA_FROM_DEVICE; - retval = fw_iso_buffer_init(&client->buffer, client->device->card, - page_count, direction); - if (retval < 0) - return retval; + ret = fw_iso_buffer_init(&client->buffer, client->device->card, + page_count, direction); + if (ret < 0) + return ret; - retval = fw_iso_buffer_map(&client->buffer, vma); - if (retval < 0) + ret = fw_iso_buffer_map(&client->buffer, vma); + if (ret < 0) fw_iso_buffer_destroy(&client->buffer, client->device->card); - return retval; + return ret; +} + +static int shutdown_resource(int id, void *p, void *data) +{ + struct client_resource *r = p; + struct client *client = data; + + r->release(client, r); + client_put(client); + + return 0; } static int fw_device_op_release(struct inode *inode, struct file *file) { struct client *client = file->private_data; struct event *e, *next_e; - struct client_resource *r, *next_r; - unsigned long flags; - if (client->buffer.pages) - fw_iso_buffer_destroy(&client->buffer, client->device->card); + mutex_lock(&client->device->client_list_mutex); + list_del(&client->link); + mutex_unlock(&client->device->client_list_mutex); if (client->iso_context) fw_iso_context_destroy(client->iso_context); - list_for_each_entry_safe(r, next_r, &client->resource_list, link) - r->release(client, r); + if (client->buffer.pages) + fw_iso_buffer_destroy(&client->buffer, client->device->card); - /* - * FIXME: We should wait for the async tasklets to stop - * running before freeing the memory. - */ + /* Freeze client->resource_idr and client->event_list */ + spin_lock_irq(&client->lock); + client->in_shutdown = true; + spin_unlock_irq(&client->lock); + + idr_for_each(&client->resource_idr, shutdown_resource, client); + idr_remove_all(&client->resource_idr); + idr_destroy(&client->resource_idr); list_for_each_entry_safe(e, next_e, &client->event_list, link) kfree(e); - spin_lock_irqsave(&client->device->card->lock, flags); - list_del(&client->link); - spin_unlock_irqrestore(&client->device->card->lock, flags); - - fw_device_put(client->device); - kfree(client); + client_put(client); return 0; } diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index bf53acb..a47e212 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c @@ -18,22 +18,26 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include <linux/module.h> -#include <linux/wait.h> -#include <linux/errno.h> -#include <linux/kthread.h> -#include <linux/device.h> +#include <linux/ctype.h> #include <linux/delay.h> +#include <linux/device.h> +#include <linux/errno.h> #include <linux/idr.h> #include <linux/jiffies.h> -#include <linux/string.h> +#include <linux/kobject.h> +#include <linux/list.h> +#include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/semaphore.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/workqueue.h> + #include <asm/system.h> -#include <linux/ctype.h> -#include "fw-transaction.h" -#include "fw-topology.h" + #include "fw-device.h" +#include "fw-topology.h" +#include "fw-transaction.h" void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) { @@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) vendor, model, specifier_id, version); } -static int -fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) +static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) { struct fw_unit *unit = fw_unit(dev); char modalias[64]; @@ -152,27 +155,6 @@ struct bus_type fw_bus_type = { }; EXPORT_SYMBOL(fw_bus_type); -static void fw_device_release(struct device *dev) -{ - struct fw_device *device = fw_device(dev); - struct fw_card *card = device->card; - unsigned long flags; - - /* - * Take the card lock so we don't set this to NULL while a - * FW_NODE_UPDATED callback is being handled or while the - * bus manager work looks at this node. - */ - spin_lock_irqsave(&card->lock, flags); - device->node->data = NULL; - spin_unlock_irqrestore(&card->lock, flags); - - fw_node_put(device->node); - kfree(device->config_rom); - kfree(device); - fw_card_put(card); -} - int fw_device_enable_phys_dma(struct fw_device *device) { int generation = device->generation; @@ -191,8 +173,8 @@ struct config_rom_attribute { u32 key; }; -static ssize_t -show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) +static ssize_t show_immediate(struct device *dev, + struct device_attribute *dattr, char *buf) { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); @@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) #define IMMEDIATE_ATTR(name, key) \ { __ATTR(name, S_IRUGO, show_immediate, NULL), key } -static ssize_t -show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) +static ssize_t show_text_leaf(struct device *dev, + struct device_attribute *dattr, char *buf) { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); @@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = { TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), }; -static void -init_fw_attribute_group(struct device *dev, - struct device_attribute *attrs, - struct fw_attribute_group *group) +static void init_fw_attribute_group(struct device *dev, + struct device_attribute *attrs, + struct fw_attribute_group *group) { struct device_attribute *attr; int i, j; @@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev, dev->groups = group->groups; } -static ssize_t -modalias_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_unit *unit = fw_unit(dev); int length; @@ -332,9 +312,8 @@ modalias_show(struct device *dev, return length + 1; } -static ssize_t -rom_index_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t rom_index_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev->parent); struct fw_unit *unit = fw_unit(dev); @@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = { __ATTR_NULL, }; -static ssize_t -config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t config_rom_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); size_t length; @@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) return length; } -static ssize_t -guid_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t guid_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct fw_device *device = fw_device(dev); int ret; @@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = { __ATTR_NULL, }; -static int -read_rom(struct fw_device *device, int generation, int index, u32 *data) +static int read_rom(struct fw_device *device, + int generation, int index, u32 *data) { int rcode; @@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation) kfree(old_rom); ret = 0; - device->cmc = rom[2] & 1 << 30; + device->cmc = rom[2] >> 30 & 1; out: kfree(rom); @@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work) fw_device_put(device); } +static void fw_device_release(struct device *dev) +{ + struct fw_device *device = fw_device(dev); + struct fw_card *card = device->card; + unsigned long flags; + + /* + * Take the card lock so we don't set this to NULL while a + * FW_NODE_UPDATED callback is being handled or while the + * bus manager work looks at this node. + */ + spin_lock_irqsave(&card->lock, flags); + device->node->data = NULL; + spin_unlock_irqrestore(&card->lock, flags); + + fw_node_put(device->node); + kfree(device->config_rom); + kfree(device); + fw_card_put(card); +} + static struct device_type fw_device_type = { - .release = fw_device_release, + .release = fw_device_release, }; -static void fw_device_update(struct work_struct *work); +static int update_unit(struct device *dev, void *data) +{ + struct fw_unit *unit = fw_unit(dev); + struct fw_driver *driver = (struct fw_driver *)dev->driver; + + if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { + down(&dev->sem); + driver->update(unit); + up(&dev->sem); + } + + return 0; +} + +static void fw_device_update(struct work_struct *work) +{ + struct fw_device *device = + container_of(work, struct fw_device, work.work); + + fw_device_cdev_update(device); + device_for_each_child(&device->device, NULL, update_unit); +} /* * If a device was pending for deletion because its node went away but its @@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data) return match; } +enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; + +void fw_device_set_broadcast_channel(struct fw_device *device, int generation) +{ + struct fw_card *card = device->card; + __be32 data; + int rcode; + + if (!card->broadcast_channel_allocated) + return; + + if (device->bc_implemented == BC_UNKNOWN) { + rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, + device->node_id, generation, device->max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + &data, 4); + switch (rcode) { + case RCODE_COMPLETE: + if (data & cpu_to_be32(1 << 31)) { + device->bc_implemented = BC_IMPLEMENTED; + break; + } + /* else fall through to case address error */ + case RCODE_ADDRESS_ERROR: + device->bc_implemented = BC_UNIMPLEMENTED; + } + } + + if (device->bc_implemented == BC_IMPLEMENTED) { + data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | + BROADCAST_CHANNEL_VALID); + fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, + device->node_id, generation, device->max_speed, + CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, + &data, 4); + } +} + static void fw_device_init(struct work_struct *work) { struct fw_device *device = container_of(work, struct fw_device, work.work); struct device *revived_dev; - int minor, err; + int minor, ret; /* * All failure paths here set node->data to NULL, so that we @@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work) fw_device_get(device); down_write(&fw_device_rwsem); - err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? + ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? idr_get_new(&fw_device_idr, device, &minor) : -ENOMEM; up_write(&fw_device_rwsem); - if (err < 0) + if (ret < 0) goto error; device->device.bus = &fw_bus_type; @@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work) device->config_rom[3], device->config_rom[4], 1 << device->max_speed); device->config_rom_retries = 0; + + fw_device_set_broadcast_channel(device, device->generation); } /* @@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work) put_device(&device->device); /* our reference */ } -static int update_unit(struct device *dev, void *data) -{ - struct fw_unit *unit = fw_unit(dev); - struct fw_driver *driver = (struct fw_driver *)dev->driver; - - if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { - down(&dev->sem); - driver->update(unit); - up(&dev->sem); - } - - return 0; -} - -static void fw_device_update(struct work_struct *work) -{ - struct fw_device *device = - container_of(work, struct fw_device, work.work); - - fw_device_cdev_update(device); - device_for_each_child(&device->device, NULL, update_unit); -} - enum { REREAD_BIB_ERROR, REREAD_BIB_GONE, @@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation) if (i == 0 && q == 0) return REREAD_BIB_GONE; - if (i > device->config_rom_length || q != device->config_rom[i]) + if (q != device->config_rom[i]) return REREAD_BIB_CHANGED; } @@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) device->node = fw_node_get(node); device->node_id = node->node_id; device->generation = card->generation; + mutex_init(&device->client_list_mutex); INIT_LIST_HEAD(&device->client_list); /* diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 8ef6ec2..9758893 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h @@ -19,10 +19,17 @@ #ifndef __fw_device_h #define __fw_device_h +#include <linux/device.h> #include <linux/fs.h> -#include <linux/cdev.h> #include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> #include <linux/rwsem.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/workqueue.h> + #include <asm/atomic.h> enum fw_device_state { @@ -38,6 +45,9 @@ struct fw_attribute_group { struct attribute *attrs[11]; }; +struct fw_node; +struct fw_card; + /* * Note, fw_device.generation always has to be read before fw_device.node_id. * Use SMP memory barriers to ensure this. Otherwise requests will be sent @@ -61,13 +71,18 @@ struct fw_device { int node_id; int generation; unsigned max_speed; - bool cmc; struct fw_card *card; struct device device; + + struct mutex client_list_mutex; struct list_head client_list; + u32 *config_rom; size_t config_rom_length; int config_rom_retries; + unsigned cmc:1; + unsigned bc_implemented:2; + struct delayed_work work; struct fw_attribute_group attribute_group; }; @@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device) struct fw_device *fw_device_get_by_devt(dev_t devt); int fw_device_enable_phys_dma(struct fw_device *device); +void fw_device_set_broadcast_channel(struct fw_device *device, int generation); void fw_device_cdev_update(struct fw_device *device); void fw_device_cdev_remove(struct fw_device *device); @@ -176,8 +192,7 @@ struct fw_driver { const struct fw_device_id *id_table; }; -static inline struct fw_driver * -fw_driver(struct device_driver *drv) +static inline struct fw_driver *fw_driver(struct device_driver *drv) { return container_of(drv, struct fw_driver, driver); } diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c index e14c03d..2baf100 100644 --- a/drivers/firewire/fw-iso.c +++ b/drivers/firewire/fw-iso.c @@ -1,5 +1,7 @@ /* - * Isochronous IO functionality + * Isochronous I/O functionality: + * - Isochronous DMA context management + * - Isochronous bus resource management (channels, bandwidth), client side * * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> * @@ -18,21 +20,25 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#include <linux/kernel.h> -#include <linux/module.h> #include <linux/dma-mapping.h> -#include <linux/vmalloc.h> +#include <linux/errno.h> +#include <linux/firewire-constants.h> +#include <linux/kernel.h> #include <linux/mm.h> +#include <linux/spinlock.h> +#include <linux/vmalloc.h> -#include "fw-transaction.h" #include "fw-topology.h" -#include "fw-device.h" +#include "fw-transaction.h" -int -fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, - int page_count, enum dma_data_direction direction) +/* + * Isochronous DMA context management + */ + +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction) { - int i, j, retval = -ENOMEM; + int i, j; dma_addr_t address; buffer->page_count = page_count; @@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, kfree(buffer->pages); out: buffer->pages = NULL; - return retval; + + return -ENOMEM; } int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) { unsigned long uaddr; - int i, retval; + int i, err; uaddr = vma->vm_start; for (i = 0; i < buffer->page_count; i++) { - retval = vm_insert_page(vma, uaddr, buffer->pages[i]); - if (retval) - return retval; + err = vm_insert_page(vma, uaddr, buffer->pages[i]); + if (err) + return err; + uaddr += PAGE_SIZE; } @@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, buffer->pages = NULL; } -struct fw_iso_context * -fw_iso_context_create(struct fw_card *card, int type, - int channel, int speed, size_t header_size, - fw_iso_callback_t callback, void *callback_data) +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data) { struct fw_iso_context *ctx; - ctx = card->driver->allocate_iso_context(card, type, header_size); + ctx = card->driver->allocate_iso_context(card, + type, channel, header_size); if (IS_ERR(ctx)) return ctx; @@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx) card->driver->free_iso_context(ctx); } -int -fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags) { return ctx->card->driver->start_iso(ctx, cycle, sync, tags); } -int -fw_iso_context_queue(struct fw_iso_context *ctx, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct fw_card *card = ctx->card; return card->driver->queue_iso(ctx, packet, buffer, payload); } -int -fw_iso_context_stop(struct fw_iso_context *ctx) +int fw_iso_context_stop(struct fw_iso_context *ctx) { return ctx->card->driver->stop_iso(ctx); } + +/* + * Isochronous bus resource management (channels, bandwidth), client side + */ + +static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, + int bandwidth, bool allocate) +{ + __be32 data[2]; + int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; + + /* + * On a 1394a IRM with low contention, try < 1 is enough. + * On a 1394-1995 IRM, we need at least try < 2. + * Let's just do try < 5. + */ + for (try = 0; try < 5; try++) { + new = allocate ? old - bandwidth : old + bandwidth; + if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) + break; + + data[0] = cpu_to_be32(old); + data[1] = cpu_to_be32(new); + switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, + irm_id, generation, SCODE_100, + CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, + data, sizeof(data))) { + case RCODE_GENERATION: + /* A generation change frees all bandwidth. */ + return allocate ? -EAGAIN : bandwidth; + + case RCODE_COMPLETE: + if (be32_to_cpup(data) == old) + return bandwidth; + + old = be32_to_cpup(data); + /* Fall through. */ + } + } + + return -EIO; +} + +static int manage_channel(struct fw_card *card, int irm_id, int generation, + u32 channels_mask, u64 offset, bool allocate) +{ + __be32 data[2], c, all, old; + int i, retry = 5; + + old = all = allocate ? cpu_to_be32(~0) : 0; + + for (i = 0; i < 32; i++) { + if (!(channels_mask & 1 << i)) + continue; + + c = cpu_to_be32(1 << (31 - i)); + if ((old & c) != (all & c)) + continue; + + data[0] = old; + data[1] = old ^ c; + switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, + irm_id, generation, SCODE_100, + offset, data, sizeof(data))) { + case RCODE_GENERATION: + /* A generation change frees all channels. */ + return allocate ? -EAGAIN : i; + + case RCODE_COMPLETE: + if (data[0] == old) + return i; + + old = data[0]; + + /* Is the IRM 1394a-2000 compliant? */ + if ((data[0] & c) == (data[1] & c)) + continue; + + /* 1394-1995 IRM, fall through to retry. */ + default: + if (retry--) + i--; + } + } + + return -EIO; +} + +static void deallocate_channel(struct fw_card *card, int irm_id, + int generation, int channel) +{ + u32 mask; + u64 offset; + + mask = channel < 32 ? 1 << channel : 1 << (channel - 32); + offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; + + manage_channel(card, irm_id, generation, mask, offset, false); +} + +/** + * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth + * + * In parameters: card, generation, channels_mask, bandwidth, allocate + * Out parameters: channel, bandwidth + * This function blocks (sleeps) during communication with the IRM. + * + * Allocates or deallocates at most one channel out of channels_mask. + * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. + * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for + * channel 0 and LSB for channel 63.) + * Allocates or deallocates as many bandwidth allocation units as specified. + * + * Returns channel < 0 if no channel was allocated or deallocated. + * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. + * + * If generation is stale, deallocations succeed but allocations fail with + * channel = -EAGAIN. + * + * If channel allocation fails, no bandwidth will be allocated either. + * If bandwidth allocation fails, no channel will be allocated either. + * But deallocations of channel and bandwidth are tried independently + * of each other's success. + */ +void fw_iso_resource_manage(struct fw_card *card, int generation, + u64 channels_mask, int *channel, int *bandwidth, + bool allocate) +{ + u32 channels_hi = channels_mask; /* channels 31...0 */ + u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ + int irm_id, ret, c = -EINVAL; + + spin_lock_irq(&card->lock); + irm_id = card->irm_node->node_id; + spin_unlock_irq(&card->lock); + + if (channels_hi) + c = manage_channel(card, irm_id, generation, channels_hi, + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate); + if (channels_lo && c < 0) { + c = manage_channel(card, irm_id, generation, channels_lo, + CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate); + if (c >= 0) + c += 32; + } + *channel = c; + + if (allocate && channels_mask != 0 && c < 0) + *bandwidth = 0; + + if (*bandwidth == 0) + return; + + ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate); + if (ret < 0) + *bandwidth = 0; + + if (allocate && ret < 0 && c >= 0) { + deallocate_channel(card, irm_id, generation, c); + *channel = ret; + } +} diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 6d19828..1180d0b 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c @@ -205,6 +205,7 @@ struct fw_ohci { u32 it_context_mask; struct iso_context *it_context_list; + u64 ir_context_channels; u32 ir_context_mask; struct iso_context *ir_context_list; }; @@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci) reg_read(ohci, OHCI1394_Version); } -static int -ohci_update_phy_reg(struct fw_card *card, int addr, - int clear_bits, int set_bits) +static int ohci_update_phy_reg(struct fw_card *card, int addr, + int clear_bits, int set_bits) { struct fw_ohci *ohci = fw_ohci(card); u32 val, old; @@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data) } } -static int -ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) +static int ar_context_init(struct ar_context *ctx, + struct fw_ohci *ohci, u32 regs) { struct ar_buffer ab; @@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx) flush_writes(ctx->ohci); } -static struct descriptor * -find_branch_descriptor(struct descriptor *d, int z) +static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) { int b, key; @@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data) * Allocate a new buffer and add it to the list of free buffers for this * context. Must be called with ohci->lock held. */ -static int -context_add_buffer(struct context *ctx) +static int context_add_buffer(struct context *ctx) { struct descriptor_buffer *desc; dma_addr_t uninitialized_var(bus_addr); @@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx) return 0; } -static int -context_init(struct context *ctx, struct fw_ohci *ohci, - u32 regs, descriptor_callback_t callback) +static int context_init(struct context *ctx, struct fw_ohci *ohci, + u32 regs, descriptor_callback_t callback) { ctx->ohci = ohci; ctx->regs = regs; @@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci, return 0; } -static void -context_release(struct context *ctx) +static void context_release(struct context *ctx) { struct fw_card *card = &ctx->ohci->card; struct descriptor_buffer *desc, *tmp; @@ -827,8 +823,8 @@ context_release(struct context *ctx) } /* Must be called with ohci->lock held */ -static struct descriptor * -context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) +static struct descriptor *context_get_descriptors(struct context *ctx, + int z, dma_addr_t *d_bus) { struct descriptor *d = NULL; struct descriptor_buffer *desc = ctx->buffer_tail; @@ -912,8 +908,8 @@ struct driver_data { * Must always be called with the ochi->lock held to ensure proper * generation handling and locking around packet queue manipulation. */ -static int -at_context_queue_packet(struct context *ctx, struct fw_packet *packet) +static int at_context_queue_packet(struct context *ctx, + struct fw_packet *packet) { struct fw_ohci *ohci = ctx->ohci; dma_addr_t d_bus, uninitialized_var(payload_bus); @@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) */ header = (__le32 *) &d[1]; - if (packet->header_length > 8) { + switch (packet->header_length) { + case 16: + case 12: header[0] = cpu_to_le32((packet->header[0] & 0xffff) | (packet->speed << 16)); header[1] = cpu_to_le32((packet->header[1] & 0xffff) | @@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet) header[3] = (__force __le32) packet->header[3]; d[0].req_count = cpu_to_le16(packet->header_length); - } else { + break; + + case 8: header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | (packet->speed << 16)); header[1] = cpu_to_le32(packet->header[0]); header[2] = cpu_to_le32(packet->header[1]); d[0].req_count = cpu_to_le16(12); + break; + + case 4: + header[0] = cpu_to_le32((packet->header[0] & 0xffff) | + (packet->speed << 16)); + header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); + d[0].req_count = cpu_to_le16(8); + break; + + default: + /* BUG(); */ + packet->ack = RCODE_SEND_ERROR; + return -1; } driver_data = (struct driver_data *) &d[3]; @@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context, #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) -static void -handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) +static void handle_local_rom(struct fw_ohci *ohci, + struct fw_packet *packet, u32 csr) { struct fw_packet response; int tcode, length, i; @@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) fw_core_handle_response(&ohci->card, &response); } -static void -handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) +static void handle_local_lock(struct fw_ohci *ohci, + struct fw_packet *packet, u32 csr) { struct fw_packet response; int tcode, length, ext_tcode, sel; @@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) fw_core_handle_response(&ohci->card, &response); } -static void -handle_local_request(struct context *ctx, struct fw_packet *packet) +static void handle_local_request(struct context *ctx, struct fw_packet *packet) { u64 offset; u32 csr; @@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet) } } -static void -at_context_transmit(struct context *ctx, struct fw_packet *packet) +static void at_context_transmit(struct context *ctx, struct fw_packet *packet) { unsigned long flags; - int retval; + int ret; spin_lock_irqsave(&ctx->ohci->lock, flags); @@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet) return; } - retval = at_context_queue_packet(ctx, packet); + ret = at_context_queue_packet(ctx, packet); spin_unlock_irqrestore(&ctx->ohci->lock, flags); - if (retval < 0) + if (ret < 0) packet->callback(packet, &ctx->ohci->card, packet->ack); } @@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length) return 0; } -static int -ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) +static int ohci_set_config_rom(struct fw_card *card, + u32 *config_rom, size_t length) { struct fw_ohci *ohci; unsigned long flags; - int retval = -EBUSY; + int ret = -EBUSY; __be32 *next_config_rom; dma_addr_t uninitialized_var(next_config_rom_bus); @@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); - retval = 0; + ret = 0; } spin_unlock_irqrestore(&ohci->lock, flags); @@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) * controller could need to access it before the bus reset * takes effect. */ - if (retval == 0) + if (ret == 0) fw_core_initiate_bus_reset(&ohci->card, 1); else dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom, next_config_rom_bus); - return retval; + return ret; } static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) @@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) struct fw_ohci *ohci = fw_ohci(card); struct context *ctx = &ohci->at_request_ctx; struct driver_data *driver_data = packet->driver_data; - int retval = -ENOENT; + int ret = -ENOENT; tasklet_disable(&ctx->tasklet); @@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) driver_data->packet = NULL; packet->ack = RCODE_CANCELLED; packet->callback(packet, &ohci->card, packet->ack); - retval = 0; - + ret = 0; out: tasklet_enable(&ctx->tasklet); - return retval; + return ret; } -static int -ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) +static int ohci_enable_phys_dma(struct fw_card *card, + int node_id, int generation) { #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA return 0; #else struct fw_ohci *ohci = fw_ohci(card); unsigned long flags; - int n, retval = 0; + int n, ret = 0; /* * FIXME: Make sure this bitmask is cleared when we clear the busReset @@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) spin_lock_irqsave(&ohci->lock, flags); if (ohci->generation != generation) { - retval = -ESTALE; + ret = -ESTALE; goto out; } @@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) flush_writes(ohci); out: spin_unlock_irqrestore(&ohci->lock, flags); - return retval; + + return ret; #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ } -static u64 -ohci_get_bus_time(struct fw_card *card) +static u64 ohci_get_bus_time(struct fw_card *card) { struct fw_ohci *ohci = fw_ohci(card); u32 cycle_time; @@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card) return bus_time; } +static void copy_iso_headers(struct iso_context *ctx, void *p) +{ + int i = ctx->header_length; + + if (i + ctx->base.header_size > PAGE_SIZE) + return; + + /* + * The iso header is byteswapped to little endian by + * the controller, but the remaining header quadlets + * are big endian. We want to present all the headers + * as big endian, so we have to swap the first quadlet. + */ + if (ctx->base.header_size > 0) + *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); + if (ctx->base.header_size > 4) + *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p); + if (ctx->base.header_size > 8) + memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8); + ctx->header_length += ctx->base.header_size; +} + static int handle_ir_dualbuffer_packet(struct context *context, struct descriptor *d, struct descriptor *last) @@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context, __le32 *ir_header; size_t header_length; void *p, *end; - int i; if (db->first_res_count != 0 && db->second_res_count != 0) { if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { @@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context, header_length = le16_to_cpu(db->first_req_count) - le16_to_cpu(db->first_res_count); - i = ctx->header_length; p = db + 1; end = p + header_length; - while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { - /* - * The iso header is byteswapped to little endian by - * the controller, but the remaining header quadlets - * are big endian. We want to present all the headers - * as big endian, so we have to swap the first - * quadlet. - */ - *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); - memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); - i += ctx->base.header_size; + while (p < end) { + copy_iso_headers(ctx, p); ctx->excess_bytes += (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; - p += ctx->base.header_size + 4; + p += max(ctx->base.header_size, (size_t)8); } - ctx->header_length = i; ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - le16_to_cpu(db->second_res_count); @@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context, struct descriptor *pd; __le32 *ir_header; void *p; - int i; for (pd = d; pd <= last; pd++) { if (pd->transfer_status) @@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context, /* Descriptor(s) not done yet, stop iteration */ return 0; - i = ctx->header_length; - p = last + 1; - - if (ctx->base.header_size > 0 && - i + ctx->base.header_size <= PAGE_SIZE) { - /* - * The iso header is byteswapped to little endian by - * the controller, but the remaining header quadlets - * are big endian. We want to present all the headers - * as big endian, so we have to swap the first quadlet. - */ - *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4)); - memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); - ctx->header_length += ctx->base.header_size; - } + p = last + 1; + copy_iso_headers(ctx, p); if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { ir_header = (__le32 *) p; @@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context, return 1; } -static struct fw_iso_context * -ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) +static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, + int type, int channel, size_t header_size) { struct fw_ohci *ohci = fw_ohci(card); struct iso_context *ctx, *list; descriptor_callback_t callback; + u64 *channels, dont_care = ~0ULL; u32 *mask, regs; unsigned long flags; - int index, retval = -ENOMEM; + int index, ret = -ENOMEM; if (type == FW_ISO_CONTEXT_TRANSMIT) { + channels = &dont_care; mask = &ohci->it_context_mask; list = ohci->it_context_list; callback = handle_it_packet; } else { + channels = &ohci->ir_context_channels; mask = &ohci->ir_context_mask; list = ohci->ir_context_list; if (ohci->use_dualbuffer) @@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) } spin_lock_irqsave(&ohci->lock, flags); - index = ffs(*mask) - 1; - if (index >= 0) + index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; + if (index >= 0) { + *channels &= ~(1ULL << channel); *mask &= ~(1 << index); + } spin_unlock_irqrestore(&ohci->lock, flags); if (index < 0) @@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) if (ctx->header == NULL) goto out; - retval = context_init(&ctx->context, ohci, regs, callback); - if (retval < 0) + ret = context_init(&ctx->context, ohci, regs, callback); + if (ret < 0) goto out_with_header; return &ctx->base; @@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) *mask |= 1 << index; spin_unlock_irqrestore(&ohci->lock, flags); - return ERR_PTR(retval); + return ERR_PTR(ret); } static int ohci_start_iso(struct fw_iso_context *base, @@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base) } else { index = ctx - ohci->ir_context_list; ohci->ir_context_mask |= 1 << index; + ohci->ir_context_channels |= 1ULL << base->channel; } spin_unlock_irqrestore(&ohci->lock, flags); } -static int -ohci_queue_iso_transmit(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso_transmit(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d, *last, *pd; @@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base, return 0; } -static int -ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); struct db_descriptor *db = NULL; @@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, z = 2; /* - * The OHCI controller puts the status word in the header - * buffer too, so we need 4 extra bytes per packet. + * The OHCI controller puts the isochronous header and trailer in the + * buffer, so we need at least 8 bytes. */ packet_count = p->header_length / ctx->base.header_size; - header_size = packet_count * (ctx->base.header_size + 4); + header_size = packet_count * max(ctx->base.header_size, (size_t)8); /* Get header size in number of descriptors. */ header_z = DIV_ROUND_UP(header_size, sizeof(*d)); @@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, db = (struct db_descriptor *) d; db->control = cpu_to_le16(DESCRIPTOR_STATUS | DESCRIPTOR_BRANCH_ALWAYS); - db->first_size = cpu_to_le16(ctx->base.header_size + 4); + db->first_size = + cpu_to_le16(max(ctx->base.header_size, (size_t)8)); if (p->skip && rest == p->payload_length) { db->control |= cpu_to_le16(DESCRIPTOR_WAIT); db->first_req_count = db->first_size; @@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, return 0; } -static int -ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); struct descriptor *d = NULL, *pd = NULL; @@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, int page, offset, packet_count, header_size, payload_per_buffer; /* - * The OHCI controller puts the status word in the - * buffer too, so we need 4 extra bytes per packet. + * The OHCI controller puts the isochronous header and trailer in the + * buffer, so we need at least 8 bytes. */ packet_count = p->header_length / ctx->base.header_size; - header_size = ctx->base.header_size + 4; + header_size = max(ctx->base.header_size, (size_t)8); /* Get header size in number of descriptors. */ header_z = DIV_ROUND_UP(header_size, sizeof(*d)); @@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, return 0; } -static int -ohci_queue_iso(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) +static int ohci_queue_iso(struct fw_iso_context *base, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload) { struct iso_context *ctx = container_of(base, struct iso_context, base); unsigned long flags; - int retval; + int ret; spin_lock_irqsave(&ctx->context.ohci->lock, flags); if (base->type == FW_ISO_CONTEXT_TRANSMIT) - retval = ohci_queue_iso_transmit(base, packet, buffer, payload); + ret = ohci_queue_iso_transmit(base, packet, buffer, payload); else if (ctx->context.ohci->use_dualbuffer) - retval = ohci_queue_iso_receive_dualbuffer(base, packet, - buffer, payload); + ret = ohci_queue_iso_receive_dualbuffer(base, packet, + buffer, payload); else - retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, - buffer, - payload); + ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, + buffer, payload); spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); - return retval; + return ret; } static const struct fw_card_driver ohci_driver = { @@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev) #define ohci_pmac_off(dev) #endif /* CONFIG_PPC_PMAC */ -static int __devinit -pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) +static int __devinit pci_probe(struct pci_dev *dev, + const struct pci_device_id *ent) { struct fw_ohci *ohci; u32 bus_options, max_receive, link_speed, version; @@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) ohci->it_context_list = kzalloc(size, GFP_KERNEL); reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); + ohci->ir_context_channels = ~0ULL; ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); @@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) reg_read(ohci, OHCI1394_GUIDLo); err = fw_card_add(&ohci->card, max_receive, link_speed, guid); - if (err < 0) + if (err) goto fail_self_id; fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", dev_name(&dev->dev), version >> 16, version & 0xff); + return 0; fail_self_id: diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index c71c441..2bcf515 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c @@ -392,20 +392,18 @@ static const struct { } }; -static void -free_orb(struct kref *kref) +static void free_orb(struct kref *kref) { struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); kfree(orb); } -static void -sbp2_status_write(struct fw_card *card, struct fw_request *request, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void sbp2_status_write(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *payload, size_t length, void *callback_data) { struct sbp2_logical_unit *lu = callback_data; struct sbp2_orb *orb; @@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request, fw_send_response(card, request, RCODE_COMPLETE); } -static void -complete_transaction(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) +static void complete_transaction(struct fw_card *card, int rcode, + void *payload, size_t length, void *data) { struct sbp2_orb *orb = data; unsigned long flags; @@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode, kref_put(&orb->kref, free_orb); } -static void -sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, - int node_id, int generation, u64 offset) +static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, + int node_id, int generation, u64 offset) { struct fw_device *device = fw_device(lu->tgt->unit->device.parent); unsigned long flags; @@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) return retval; } -static void -complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) +static void complete_management_orb(struct sbp2_orb *base_orb, + struct sbp2_status *status) { struct sbp2_management_orb *orb = container_of(base_orb, struct sbp2_management_orb, base); @@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) complete(&orb->done); } -static int -sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, - int generation, int function, int lun_or_login_id, - void *response) +static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, + int generation, int function, + int lun_or_login_id, void *response) { struct fw_device *device = fw_device(lu->tgt->unit->device.parent); struct sbp2_management_orb *orb; @@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu) &d, sizeof(d)); } -static void -complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, - void *payload, size_t length, void *data) +static void complete_agent_reset_write_no_wait(struct fw_card *card, + int rcode, void *payload, size_t length, void *data) { kfree(data); } @@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device, sizeof(orb->page_table), DMA_TO_DEVICE); } -static unsigned int -sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) +static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) { int sam_status; @@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data) } } -static void -complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) +static void complete_command_orb(struct sbp2_orb *base_orb, + struct sbp2_status *status) { struct sbp2_command_orb *orb = container_of(base_orb, struct sbp2_command_orb, base); @@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) orb->done(orb->cmd); } -static int -sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, - struct sbp2_logical_unit *lu) +static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, + struct fw_device *device, struct sbp2_logical_unit *lu) { struct scatterlist *sg = scsi_sglist(orb->cmd); int i, n; @@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) * This is the concatenation of target port identifier and logical unit * identifier as per SAM-2...SAM-4 annex A. */ -static ssize_t -sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); struct sbp2_logical_unit *lu; diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 8dd6703..d0deecc 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c @@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card, struct fw_node * node, struct fw_node * parent); -static void -for_each_fw_node(struct fw_card *card, struct fw_node *root, - fw_node_callback_t callback) +static void for_each_fw_node(struct fw_card *card, struct fw_node *root, + fw_node_callback_t callback) { struct list_head list; struct fw_node *node, *next, *child, *parent; @@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root, fw_node_put(node); } -static void -report_lost_node(struct fw_card *card, - struct fw_node *node, struct fw_node *parent) +static void report_lost_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) { fw_node_event(card, node, FW_NODE_DESTROYED); fw_node_put(node); @@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card, card->bm_retries = 0; } -static void -report_found_node(struct fw_card *card, - struct fw_node *node, struct fw_node *parent) +static void report_found_node(struct fw_card *card, + struct fw_node *node, struct fw_node *parent) { int b_path = (node->phy_speed == SCODE_BETA); @@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) * found, lost or updated. Update the nodes in the card topology tree * as we go. */ -static void -update_tree(struct fw_card *card, struct fw_node *root) +static void update_tree(struct fw_card *card, struct fw_node *root) { struct list_head list0, list1; struct fw_node *node0, *node1, *next1; @@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root) } } -static void -update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) +static void update_topology_map(struct fw_card *card, + u32 *self_ids, int self_id_count) { int node_count; @@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) fw_compute_block_crc(card->topology_map); } -void -fw_core_handle_bus_reset(struct fw_card *card, - int node_id, int generation, - int self_id_count, u32 * self_ids) +void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, + int self_id_count, u32 *self_ids) { struct fw_node *local_node; unsigned long flags; @@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card, spin_lock_irqsave(&card->lock, flags); + card->broadcast_channel_allocated = false; card->node_id = node_id; /* * Update node_id before generation to prevent anybody from using diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h index addb9f8..3c497bb 100644 --- a/drivers/firewire/fw-topology.h +++ b/drivers/firewire/fw-topology.h @@ -19,6 +19,11 @@ #ifndef __fw_topology_h #define __fw_topology_h +#include <linux/list.h> +#include <linux/slab.h> + +#include <asm/atomic.h> + enum { FW_NODE_CREATED, FW_NODE_UPDATED, @@ -51,26 +56,22 @@ struct fw_node { struct fw_node *ports[0]; }; -static inline struct fw_node * -fw_node_get(struct fw_node *node) +static inline struct fw_node *fw_node_get(struct fw_node *node) { atomic_inc(&node->ref_count); return node; } -static inline void -fw_node_put(struct fw_node *node) +static inline void fw_node_put(struct fw_node *node) { if (atomic_dec_and_test(&node->ref_count)) kfree(node); } -void -fw_destroy_nodes(struct fw_card *card); - -int -fw_compute_block_crc(u32 *block); +struct fw_card; +void fw_destroy_nodes(struct fw_card *card); +int fw_compute_block_crc(u32 *block); #endif /* __fw_topology_h */ diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 699ac04..283dac6 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c @@ -64,10 +64,8 @@ #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) #define PHY_IDENTIFIER(id) ((id) << 30) -static int -close_transaction(struct fw_transaction *transaction, - struct fw_card *card, int rcode, - u32 *payload, size_t length) +static int close_transaction(struct fw_transaction *transaction, + struct fw_card *card, int rcode) { struct fw_transaction *t; unsigned long flags; @@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction, spin_unlock_irqrestore(&card->lock, flags); if (&t->link != &card->transaction_list) { - t->callback(card, rcode, payload, length, t->callback_data); + t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } @@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction, * Only valid for transactions that are potentially pending (ie have * been sent). */ -int -fw_cancel_transaction(struct fw_card *card, - struct fw_transaction *transaction) +int fw_cancel_transaction(struct fw_card *card, + struct fw_transaction *transaction) { /* * Cancel the packet transmission if it's still queued. That @@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card, * if the transaction is still pending and remove it in that case. */ - return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); + return close_transaction(transaction, card, RCODE_CANCELLED); } EXPORT_SYMBOL(fw_cancel_transaction); -static void -transmit_complete_callback(struct fw_packet *packet, - struct fw_card *card, int status) +static void transmit_complete_callback(struct fw_packet *packet, + struct fw_card *card, int status) { struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); switch (status) { case ACK_COMPLETE: - close_transaction(t, card, RCODE_COMPLETE, NULL, 0); + close_transaction(t, card, RCODE_COMPLETE); break; case ACK_PENDING: t->timestamp = packet->timestamp; @@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet, case ACK_BUSY_X: case ACK_BUSY_A: case ACK_BUSY_B: - close_transaction(t, card, RCODE_BUSY, NULL, 0); + close_transaction(t, card, RCODE_BUSY); break; case ACK_DATA_ERROR: - close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0); + close_transaction(t, card, RCODE_DATA_ERROR); break; case ACK_TYPE_ERROR: - close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); + close_transaction(t, card, RCODE_TYPE_ERROR); break; default: /* * In this case the ack is really a juju specific * rcode, so just forward that to the callback. */ - close_transaction(t, card, status, NULL, 0); + close_transaction(t, card, status); break; } } -static void -fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, +static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, int destination_id, int source_id, int generation, int speed, unsigned long long offset, void *payload, size_t length) { int ext_tcode; + if (tcode == TCODE_STREAM_DATA) { + packet->header[0] = + HEADER_DATA_LENGTH(length) | + destination_id | + HEADER_TCODE(TCODE_STREAM_DATA); + packet->header_length = 4; + packet->payload = payload; + packet->payload_length = length; + + goto common; + } + if (tcode > 0x10) { ext_tcode = tcode & ~0x10; tcode = TCODE_LOCK_REQUEST; @@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, packet->payload_length = 0; break; } - + common: packet->speed = speed; packet->generation = generation; packet->ack = 0; @@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, * @param callback function to be called when the transaction is completed * @param callback_data pointer to arbitrary data, which will be * passed to the callback + * + * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller + * needs to synthesize @destination_id with fw_stream_packet_destination_id(). */ -void -fw_send_request(struct fw_card *card, struct fw_transaction *t, - int tcode, int destination_id, int generation, int speed, - unsigned long long offset, - void *payload, size_t length, - fw_transaction_callback_t callback, void *callback_data) +void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, + int destination_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length, + fw_transaction_callback_t callback, void *callback_data) { unsigned long flags; int tlabel; @@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode, * Returns the RCODE. */ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, - int generation, int speed, unsigned long long offset, - void *data, size_t length) + int generation, int speed, unsigned long long offset, + void *payload, size_t length) { struct transaction_callback_data d; struct fw_transaction t; init_completion(&d.done); - d.payload = data; + d.payload = payload; fw_send_request(card, &t, tcode, destination_id, generation, speed, - offset, data, length, transaction_callback, &d); + offset, payload, length, transaction_callback, &d); wait_for_completion(&d.done); return d.rcode; @@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card) } } -static struct fw_address_handler * -lookup_overlapping_address_handler(struct list_head *list, - unsigned long long offset, size_t length) +static struct fw_address_handler *lookup_overlapping_address_handler( + struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; @@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list, return NULL; } -static struct fw_address_handler * -lookup_enclosing_address_handler(struct list_head *list, - unsigned long long offset, size_t length) +static struct fw_address_handler *lookup_enclosing_address_handler( + struct list_head *list, unsigned long long offset, size_t length) { struct fw_address_handler *handler; @@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region = #endif /* 0 */ /** - * Allocate a range of addresses in the node space of the OHCI - * controller. When a request is received that falls within the - * specified address range, the specified callback is invoked. The - * parameters passed to the callback give the details of the - * particular request. + * fw_core_add_address_handler - register for incoming requests + * @handler: callback + * @region: region in the IEEE 1212 node space address range + * + * region->start, ->end, and handler->length have to be quadlet-aligned. + * + * When a request is received that falls within the specified address range, + * the specified callback is invoked. The parameters passed to the callback + * give the details of the particular request. * * Return value: 0 on success, non-zero otherwise. * The start offset of the handler's address region is determined by * fw_core_add_address_handler() and is returned in handler->offset. - * The offset is quadlet-aligned. */ -int -fw_core_add_address_handler(struct fw_address_handler *handler, - const struct fw_address_region *region) +int fw_core_add_address_handler(struct fw_address_handler *handler, + const struct fw_address_region *region) { struct fw_address_handler *other; unsigned long flags; int ret = -EBUSY; + if (region->start & 0xffff000000000003ULL || + region->end & 0xffff000000000003ULL || + region->start >= region->end || + handler->length & 3 || + handler->length == 0) + return -EINVAL; + spin_lock_irqsave(&address_handler_lock, flags); - handler->offset = roundup(region->start, 4); + handler->offset = region->start; while (handler->offset + handler->length <= region->end) { other = lookup_overlapping_address_handler(&address_handler_list, handler->offset, handler->length); if (other != NULL) { - handler->offset = - roundup(other->offset + other->length, 4); + handler->offset += other->length; } else { list_add_tail(&handler->link, &address_handler_list); ret = 0; @@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler, EXPORT_SYMBOL(fw_core_add_address_handler); /** - * Deallocate a range of addresses allocated with fw_allocate. This - * will call the associated callback one last time with a the special - * tcode TCODE_DEALLOCATE, to let the client destroy the registered - * callback data. For convenience, the callback parameters offset and - * length are set to the start and the length respectively for the - * deallocated region, payload is set to NULL. + * fw_core_remove_address_handler - unregister an address handler */ void fw_core_remove_address_handler(struct fw_address_handler *handler) { @@ -518,9 +527,8 @@ struct fw_request { u32 data[0]; }; -static void -free_response_callback(struct fw_packet *packet, - struct fw_card *card, int status) +static void free_response_callback(struct fw_packet *packet, + struct fw_card *card, int status) { struct fw_request *request; @@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet, kfree(request); } -void -fw_fill_response(struct fw_packet *response, u32 *request_header, - int rcode, void *payload, size_t length) +void fw_fill_response(struct fw_packet *response, u32 *request_header, + int rcode, void *payload, size_t length) { int tcode, tlabel, extended_tcode, source, destination; @@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header, } EXPORT_SYMBOL(fw_fill_response); -static struct fw_request * -allocate_request(struct fw_packet *p) +static struct fw_request *allocate_request(struct fw_packet *p) { struct fw_request *request; u32 *data, length; @@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p) return request; } -void -fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) +void fw_send_response(struct fw_card *card, + struct fw_request *request, int rcode) { /* unified transaction or broadcast transaction: don't respond */ if (request->ack != ACK_PENDING || @@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) } EXPORT_SYMBOL(fw_send_response); -void -fw_core_handle_request(struct fw_card *card, struct fw_packet *p) +void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) { struct fw_address_handler *handler; struct fw_request *request; @@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p) } EXPORT_SYMBOL(fw_core_handle_request); -void -fw_core_handle_response(struct fw_card *card, struct fw_packet *p) +void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) { struct fw_transaction *t; unsigned long flags; @@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region = { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; -static void -handle_topology_map(struct fw_card *card, struct fw_request *request, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void handle_topology_map(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + int speed, unsigned long long offset, + void *payload, size_t length, void *callback_data) { int i, start, end; __be32 *map; @@ -832,12 +834,10 @@ static const struct fw_address_region registers_region = { .start = CSR_REGISTER_BASE, .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; -static void -handle_registers(struct fw_card *card, struct fw_request *request, - int tcode, int destination, int source, - int generation, int speed, - unsigned long long offset, - void *payload, size_t length, void *callback_data) +static void handle_registers(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + int speed, unsigned long long offset, + void *payload, size_t length, void *callback_data) { int reg = offset & ~CSR_REGISTER_BASE; unsigned long long bus_time; @@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = { static int __init fw_core_init(void) { - int retval; + int ret; - retval = bus_register(&fw_bus_type); - if (retval < 0) - return retval; + ret = bus_register(&fw_bus_type); + if (ret < 0) + return ret; fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); if (fw_cdev_major < 0) { @@ -951,19 +951,10 @@ static int __init fw_core_init(void) return fw_cdev_major; } - retval = fw_core_add_address_handler(&topology_map, - &topology_map_region); - BUG_ON(retval < 0); - - retval = fw_core_add_address_handler(®isters, - ®isters_region); - BUG_ON(retval < 0); - - /* Add the vendor textual descriptor. */ - retval = fw_core_add_descriptor(&vendor_id_descriptor); - BUG_ON(retval < 0); - retval = fw_core_add_descriptor(&model_id_descriptor); - BUG_ON(retval < 0); + fw_core_add_address_handler(&topology_map, &topology_map_region); + fw_core_add_address_handler(®isters, ®isters_region); + fw_core_add_descriptor(&vendor_id_descriptor); + fw_core_add_descriptor(&model_id_descriptor); return 0; } diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index 1d78e9c..dfa7990 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h @@ -82,14 +82,14 @@ #define CSR_SPEED_MAP 0x2000 #define CSR_SPEED_MAP_END 0x3000 +#define BANDWIDTH_AVAILABLE_INITIAL 4915 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) #define BROADCAST_CHANNEL_VALID (1 << 30) #define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) #define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) -static inline void -fw_memcpy_from_be32(void *_dst, void *_src, size_t size) +static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size) { u32 *dst = _dst; __be32 *src = _src; @@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size) dst[i] = be32_to_cpu(src[i]); } -static inline void -fw_memcpy_to_be32(void *_dst, void *_src, size_t size) +static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size) { fw_memcpy_from_be32(_dst, _src, size); } @@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet, struct fw_card *card, int status); typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, - void *data, - size_t length, + void *data, size_t length, void *callback_data); /* @@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card, void *data, size_t length, void *callback_data); -typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle, - int node_id, int generation, - u32 *self_ids, - int self_id_count, - void *callback_data); - struct fw_packet { int speed; int generation; @@ -187,12 +179,6 @@ struct fw_transaction { void *callback_data; }; -static inline struct fw_packet * -fw_packet(struct list_head *l) -{ - return list_entry(l, struct fw_packet, link); -} - struct fw_address_handler { u64 offset; size_t length; @@ -201,7 +187,6 @@ struct fw_address_handler { struct list_head link; }; - struct fw_address_region { u64 start; u64 end; @@ -255,6 +240,7 @@ struct fw_card { int bm_retries; int bm_generation; + bool broadcast_channel_allocated; u32 broadcast_channel; u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; }; @@ -315,10 +301,8 @@ struct fw_iso_packet { struct fw_iso_context; typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, - u32 cycle, - size_t header_length, - void *header, - void *data); + u32 cycle, size_t header_length, + void *header, void *data); /* * An iso buffer is just a set of pages mapped for DMA in the @@ -344,36 +328,25 @@ struct fw_iso_context { void *callback_data; }; -int -fw_iso_buffer_init(struct fw_iso_buffer *buffer, - struct fw_card *card, - int page_count, - enum dma_data_direction direction); -int -fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); -void -fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); - -struct fw_iso_context * -fw_iso_context_create(struct fw_card *card, int type, - int channel, int speed, size_t header_size, - fw_iso_callback_t callback, void *callback_data); - -void -fw_iso_context_destroy(struct fw_iso_context *ctx); - -int -fw_iso_context_queue(struct fw_iso_context *ctx, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload); - -int -fw_iso_context_start(struct fw_iso_context *ctx, - int cycle, int sync, int tags); - -int -fw_iso_context_stop(struct fw_iso_context *ctx); +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction); +int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); +void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); + +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data); +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload); +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags); +int fw_iso_context_stop(struct fw_iso_context *ctx); +void fw_iso_context_destroy(struct fw_iso_context *ctx); + +void fw_iso_resource_manage(struct fw_card *card, int generation, + u64 channels_mask, int *channel, int *bandwidth, bool allocate); struct fw_card_driver { /* @@ -415,7 +388,7 @@ struct fw_card_driver { struct fw_iso_context * (*allocate_iso_context)(struct fw_card *card, - int type, size_t header_size); + int type, int channel, size_t header_size); void (*free_iso_context)(struct fw_iso_context *ctx); int (*start_iso)(struct fw_iso_context *ctx, @@ -429,54 +402,45 @@ struct fw_card_driver { int (*stop_iso)(struct fw_iso_context *ctx); }; -int -fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); +int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset); -void -fw_send_request(struct fw_card *card, struct fw_transaction *t, +void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, - unsigned long long offset, void *data, size_t length, + unsigned long long offset, void *payload, size_t length, fw_transaction_callback_t callback, void *callback_data); - -int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, - int generation, int speed, unsigned long long offset, - void *data, size_t length); - int fw_cancel_transaction(struct fw_card *card, struct fw_transaction *transaction); - void fw_flush_transactions(struct fw_card *card); - +int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *payload, size_t length); void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count); +static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) +{ + return tag << 14 | channel << 8 | sy; +} + /* * Called by the topology code to inform the device code of node * activity; found, lost, or updated nodes. */ -void -fw_node_event(struct fw_card *card, struct fw_node *node, int event); +void fw_node_event(struct fw_card *card, struct fw_node *node, int event); /* API used by card level drivers */ -void -fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, - struct device *device); -int -fw_card_add(struct fw_card *card, - u32 max_receive, u32 link_speed, u64 guid); - -void -fw_core_remove_card(struct fw_card *card); - -void -fw_core_handle_bus_reset(struct fw_card *card, - int node_id, int generation, - int self_id_count, u32 *self_ids); -void -fw_core_handle_request(struct fw_card *card, struct fw_packet *request); - -void -fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); +void fw_card_initialize(struct fw_card *card, + const struct fw_card_driver *driver, struct device *device); +int fw_card_add(struct fw_card *card, + u32 max_receive, u32 link_speed, u64 guid); +void fw_core_remove_card(struct fw_card *card); +void fw_core_handle_bus_reset(struct fw_card *card, int node_id, + int generation, int self_id_count, u32 *self_ids); +void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); +void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); + +extern int fw_irm_set_broadcast_channel_register(struct device *dev, + void *data); #endif /* __fw_transaction_h */ diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c index 31400c8..d696f69 100644 --- a/drivers/ieee1394/csr.c +++ b/drivers/ieee1394/csr.c @@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = { .host_reset = host_reset, }; -const static struct hpsb_address_ops map_ops = { +static const struct hpsb_address_ops map_ops = { .read = read_maps, }; -const static struct hpsb_address_ops fcp_ops = { +static const struct hpsb_address_ops fcp_ops = { .write = write_fcp, }; -const static struct hpsb_address_ops reg_ops = { +static const struct hpsb_address_ops reg_ops = { .read = read_regs, .write = write_regs, .lock = lock_regs, .lock64 = lock64_regs, }; -const static struct hpsb_address_ops config_rom_ops = { +static const struct hpsb_address_ops config_rom_ops = { .read = read_config_rom, }; diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index cb15bfa..823a629 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops= * Export information about protocols/devices supported by this driver. */ #ifdef MODULE -static struct ieee1394_device_id dv1394_id_table[] = { +static const struct ieee1394_device_id dv1394_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 1a919df..4ca1035 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host); static void ether1394_host_reset(struct hpsb_host *host); /* Function for incoming 1394 packets */ -const static struct hpsb_address_ops addr_ops = { +static const struct hpsb_address_ops addr_ops = { .write = ether1394_write, }; @@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud) return eth1394_new_node(hi, ud); } -static struct ieee1394_device_id eth1394_id_table[] = { +static const struct ieee1394_device_id eth1394_id_table[] = { { .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION), diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 600e391..4bc4435 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c @@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, return retval; } -const static struct hpsb_address_ops dummy_ops; +static const struct hpsb_address_ops dummy_ops; /* dummy address spaces as lower and upper bounds of the host's a.s. list */ static void init_hpsb_highlevel(struct hpsb_host *host) diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index 53aada5..a6d55be 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = { static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) { struct hpsb_protocol_driver *driver; - struct ieee1394_device_id *id; + const struct ieee1394_device_id *id; int length = 0; char *scratch = buf; @@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv) { struct hpsb_protocol_driver *driver; struct unit_directory *ud; - struct ieee1394_device_id *id; + const struct ieee1394_device_id *id; /* We only match unit directories */ if (dev->platform_data != &nodemgr_ud_platform_data) diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h index ee5acdb..749b271 100644 --- a/drivers/ieee1394/nodemgr.h +++ b/drivers/ieee1394/nodemgr.h @@ -125,7 +125,7 @@ struct hpsb_protocol_driver { * probe function below can implement further protocol * dependent or vendor dependent checking. */ - struct ieee1394_device_id *id_table; + const struct ieee1394_device_id *id_table; /* * The update function is called when the node has just diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index bad66c6..da5f882 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags); -const static struct hpsb_address_ops arm_ops = { +static const struct hpsb_address_ops arm_ops = { .read = arm_read, .write = arm_write, .lock = arm_lock, @@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf) { struct compat_raw1394_req __user *cr = (typeof(cr)) buf; struct raw1394_request __user *r; + r = compat_alloc_user_space(sizeof(struct raw1394_request)); #define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) @@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf) C(tag) || C(sendb) || C(recvb)) - return ERR_PTR(-EFAULT); + return (__force const char __user *)ERR_PTR(-EFAULT); + return (const char __user *)r; } #undef C @@ -389,6 +391,7 @@ static int raw1394_compat_read(const char __user *buf, struct raw1394_request *r) { struct compat_raw1394_req __user *cr = (typeof(cr)) buf; + if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || P(type) || P(error) || @@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r) P(sendb) || P(recvb)) return -EFAULT; + return sizeof(struct compat_raw1394_req); } #undef P @@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer, sizeof(struct compat_raw1394_req) != sizeof(struct raw1394_request)) { buffer = raw1394_compat_write(buffer); - if (IS_ERR(buffer)) - return PTR_ERR(buffer); + if (IS_ERR((__force void *)buffer)) + return PTR_ERR((__force void *)buffer); } else #endif if (count != sizeof(struct raw1394_request)) { @@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file) * Export information about protocols/devices supported by this driver. */ #ifdef MODULE -static struct ieee1394_device_id raw1394_id_table[] = { +static const struct ieee1394_device_id raw1394_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f3fd865..a51ab23 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = { .host_reset = sbp2_host_reset, }; -const static struct hpsb_address_ops sbp2_ops = { +static const struct hpsb_address_ops sbp2_ops = { .write = sbp2_handle_status_write }; @@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *, static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, size_t, u16); -const static struct hpsb_address_ops sbp2_physdma_ops = { +static const struct hpsb_address_ops sbp2_physdma_ops = { .read = sbp2_handle_physdma_read, .write = sbp2_handle_physdma_write, }; @@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = { /* * Interface to driver core and IEEE 1394 core */ -static struct ieee1394_device_id sbp2_id_table[] = { +static const struct ieee1394_device_id sbp2_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, @@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu, "(firmware_revision 0x%06x, vendor_id 0x%06x," " model_id 0x%06x)", NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), - workarounds, firmware_revision, - ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, + workarounds, firmware_revision, ud->vendor_id, model); /* We would need one SCSI host template for each target to adjust diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 679a918..d287ba7 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops= * Export information about protocols/devices supported by this driver. */ #ifdef MODULE -static struct ieee1394_device_id video1394_id_table[] = { +static const struct ieee1394_device_id video1394_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c index b55d9cc..32526f1 100644 --- a/drivers/media/dvb/firewire/firedtv-avc.c +++ b/drivers/media/dvb/firewire/firedtv-avc.c @@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype) } static const char *debug_fcp_opcode(unsigned int opcode, - const u8 *data, size_t length) + const u8 *data, int length) { switch (opcode) { case AVC_OPCODE_VENDOR: break; @@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode, case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; + case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2"; case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; } return "Vendor"; } -static void debug_fcp(const u8 *data, size_t length) +static void debug_fcp(const u8 *data, int length) { unsigned int subunit_type, subunit_id, op; const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; @@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv, c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; - c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; + if (fdtv->type == FIREDTV_DVB_S2) + c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2; + else + c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; c->operand[4] = (params->frequency >> 24) & 0xff; c->operand[5] = (params->frequency >> 16) & 0xff; diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 4d078e9..c6b3ca3 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -25,10 +25,12 @@ #include <linux/types.h> #include <linux/firewire-constants.h> -#define FW_CDEV_EVENT_BUS_RESET 0x00 -#define FW_CDEV_EVENT_RESPONSE 0x01 -#define FW_CDEV_EVENT_REQUEST 0x02 -#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 +#define FW_CDEV_EVENT_BUS_RESET 0x00 +#define FW_CDEV_EVENT_RESPONSE 0x01 +#define FW_CDEV_EVENT_REQUEST 0x02 +#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 +#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 +#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 /** * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types @@ -136,7 +138,24 @@ struct fw_cdev_event_request { * This event is sent when the controller has completed an &fw_cdev_iso_packet * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers * stripped of all packets up until and including the interrupt packet are - * returned in the @header field. + * returned in the @header field. The amount of header data per packet is as + * specified at iso context creation by &fw_cdev_create_iso_context.header_size. + * + * In version 1 of this ABI, header data consisted of the 1394 isochronous + * packet header, followed by quadlets from the packet payload if + * &fw_cdev_create_iso_context.header_size > 4. + * + * In version 2 of this ABI, header data consist of the 1394 isochronous + * packet header, followed by a timestamp quadlet if + * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the + * packet payload if &fw_cdev_create_iso_context.header_size > 8. + * + * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. + * + * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, + * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: + * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte + * order. */ struct fw_cdev_event_iso_interrupt { __u64 closure; @@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt { }; /** + * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed + * @closure: See &fw_cdev_event_common; + * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl + * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED + * @handle: Reference by which an allocated resource can be deallocated + * @channel: Isochronous channel which was (de)allocated, if any + * @bandwidth: Bandwidth allocation units which were (de)allocated, if any + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous + * resource was allocated at the IRM. The client has to check @channel and + * @bandwidth for whether the allocation actually succeeded. + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous + * resource was deallocated at the IRM. It is also sent when automatic + * reallocation after a bus reset failed. + * + * @channel is <0 if no channel was (de)allocated or if reallocation failed. + * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed. + */ +struct fw_cdev_event_iso_resource { + __u64 closure; + __u32 type; + __u32 handle; + __s32 channel; + __s32 bandwidth; +}; + +/** * union fw_cdev_event - Convenience union of fw_cdev_event_ types * @common: Valid for all types * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT + * @iso_resource: Valid if @common.type == + * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED * * Convenience union for userspace use. Events could be read(2) into an * appropriately aligned char buffer and then cast to this union for further @@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt { * not fit will be discarded so that the next read(2) will return a new event. */ union fw_cdev_event { - struct fw_cdev_event_common common; - struct fw_cdev_event_bus_reset bus_reset; - struct fw_cdev_event_response response; - struct fw_cdev_event_request request; - struct fw_cdev_event_iso_interrupt iso_interrupt; + struct fw_cdev_event_common common; + struct fw_cdev_event_bus_reset bus_reset; + struct fw_cdev_event_response response; + struct fw_cdev_event_request request; + struct fw_cdev_event_iso_interrupt iso_interrupt; + struct fw_cdev_event_iso_resource iso_resource; }; -#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) -#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) -#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) -#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) -#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) -#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) -#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) -#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) +/* available since kernel version 2.6.22 */ +#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) +#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) +#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) +#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) +#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) +#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) +#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) +#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) +#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) +#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) +#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) -#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) -#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) -#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) -#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) -#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) +/* available since kernel version 2.6.24 */ +#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) -/* FW_CDEV_VERSION History - * - * 1 Feb 18, 2007: Initial version. +/* available since kernel version 2.6.30 */ +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */ +#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) +#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet) + +/* + * FW_CDEV_VERSION History + * 1 (2.6.22) - initial version + * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if + * &fw_cdev_create_iso_context.header_size is 8 or more */ -#define FW_CDEV_VERSION 1 +#define FW_CDEV_VERSION 2 /** * struct fw_cdev_get_info - General purpose information ioctl @@ -201,7 +266,7 @@ union fw_cdev_event { * case, @rom_length is updated with the actual length of the * configuration ROM. * @rom: If non-zero, address of a buffer to be filled by a copy of the - * local node's configuration ROM + * device's configuration ROM * @bus_reset: If non-zero, address of a buffer to be filled by a * &struct fw_cdev_event_bus_reset with the current state * of the bus. This does not cause a bus reset to happen. @@ -229,7 +294,7 @@ struct fw_cdev_get_info { * Send a request to the device. This ioctl implements all outgoing requests. * Both quadlet and block request specify the payload as a pointer to the data * in the @data field. Once the transaction completes, the kernel writes an - * &fw_cdev_event_request event back. The @closure field is passed back to + * &fw_cdev_event_response event back. The @closure field is passed back to * user space in the response event. */ struct fw_cdev_send_request { @@ -284,9 +349,9 @@ struct fw_cdev_allocate { }; /** - * struct fw_cdev_deallocate - Free an address range allocation - * @handle: Handle to the address range, as returned by the kernel when the - * range was allocated + * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource + * @handle: Handle to the address range or iso resource, as returned by the + * kernel when the range or resource was allocated */ struct fw_cdev_deallocate { __u32 handle; @@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset { * If successful, the kernel adds the descriptor and writes back a handle to the * kernel-side object to be used for later removal of the descriptor block and * immediate key. + * + * This ioctl affects the configuration ROMs of all local nodes. + * The ioctl only succeeds on device files which represent a local node. */ struct fw_cdev_add_descriptor { __u32 immediate; @@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor { * descriptor was added * * Remove a descriptor block and accompanying immediate key from the local - * node's configuration ROM. + * nodes' configuration ROMs. */ struct fw_cdev_remove_descriptor { __u32 handle; @@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor { * * If a context was successfully created, the kernel writes back a handle to the * context, which must be passed in for subsequent operations on that context. + * + * Note that the effect of a @header_size > 4 depends on + * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. */ struct fw_cdev_create_iso_context { __u32 type; @@ -473,10 +544,91 @@ struct fw_cdev_stop_iso { * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer * and also the system clock. This allows to express the receive time of an * isochronous packet as a system time with microsecond accuracy. + * + * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and + * 12 bits cycleOffset, in host byte order. */ struct fw_cdev_get_cycle_timer { __u64 local_time; __u32 cycle_timer; }; +/** + * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth + * @closure: Passed back to userspace in correponding iso resource events + * @channels: Isochronous channels of which one is to be (de)allocated + * @bandwidth: Isochronous bandwidth units to be (de)allocated + * @handle: Handle to the allocation, written by the kernel (only valid in + * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls) + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an + * isochronous channel and/or of isochronous bandwidth at the isochronous + * resource manager (IRM). Only one of the channels specified in @channels is + * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after + * communication with the IRM, indicating success or failure in the event data. + * The kernel will automatically reallocate the resources after bus resets. + * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event + * will be sent. The kernel will also automatically deallocate the resources + * when the file descriptor is closed. + * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate + * deallocation of resources which were allocated as described above. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation + * without automatic re- or deallocation. + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation, + * indicating success or failure in its data. + * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like + * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed + * instead of allocated. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources + * for the lifetime of the fd or handle. + * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources + * for the duration of a bus generation. + * + * @channels is a host-endian bitfield with the least significant bit + * representing channel 0 and the most significant bit representing channel 63: + * 1ULL << c for each channel c that is a candidate for (de)allocation. + * + * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send + * one quadlet of data (payload or header data) at speed S1600. + */ +struct fw_cdev_allocate_iso_resource { + __u64 closure; + __u64 channels; + __u32 bandwidth; + __u32 handle; +}; + +/** + * struct fw_cdev_send_stream_packet - send an asynchronous stream packet + * @length: Length of outgoing payload, in bytes + * @tag: Data format tag + * @channel: Isochronous channel to transmit to + * @sy: Synchronization code + * @closure: Passed back to userspace in the response event + * @data: Userspace pointer to payload + * @generation: The bus generation where packet is valid + * @speed: Speed to transmit at + * + * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet + * to every device which is listening to the specified channel. The kernel + * writes an &fw_cdev_event_response event which indicates success or failure of + * the transmission. + */ +struct fw_cdev_send_stream_packet { + __u32 length; + __u32 tag; + __u32 channel; + __u32 sy; + __u64 closure; + __u64 data; + __u32 generation; + __u32 speed; +}; + #endif /* _LINUX_FIREWIRE_CDEV_H */ |