2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bug.h>
22 #include <linux/compat.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/errno.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-cdev.h>
28 #include <linux/idr.h>
29 #include <linux/irqflags.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/kref.h>
34 #include <linux/module.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h> /* required for linux/wait.h */
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/time.h>
42 #include <linux/uaccess.h>
43 #include <linux/vmalloc.h>
44 #include <linux/wait.h>
45 #include <linux/workqueue.h>
47 #include <asm/system.h>
52 * ABI version history is documented in linux/firewire-cdev.h.
54 #define FW_CDEV_KERNEL_VERSION 4
55 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
56 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
60 struct fw_device *device;
64 struct idr resource_idr;
65 struct list_head event_list;
66 wait_queue_head_t wait;
67 wait_queue_head_t tx_flush_wait;
68 u64 bus_reset_closure;
70 struct fw_iso_context *iso_context;
72 struct fw_iso_buffer buffer;
73 unsigned long vm_start;
75 struct list_head phy_receiver_link;
76 u64 phy_receiver_closure;
78 struct list_head link;
82 static inline void client_get(struct client *client)
84 kref_get(&client->kref);
87 static void client_release(struct kref *kref)
89 struct client *client = container_of(kref, struct client, kref);
91 fw_device_put(client->device);
95 static void client_put(struct client *client)
97 kref_put(&client->kref, client_release);
100 struct client_resource;
101 typedef void (*client_resource_release_fn_t)(struct client *,
102 struct client_resource *);
103 struct client_resource {
104 client_resource_release_fn_t release;
108 struct address_handler_resource {
109 struct client_resource resource;
110 struct fw_address_handler handler;
112 struct client *client;
115 struct outbound_transaction_resource {
116 struct client_resource resource;
117 struct fw_transaction transaction;
120 struct inbound_transaction_resource {
121 struct client_resource resource;
122 struct fw_card *card;
123 struct fw_request *request;
128 struct descriptor_resource {
129 struct client_resource resource;
130 struct fw_descriptor descriptor;
134 struct iso_resource {
135 struct client_resource resource;
136 struct client *client;
137 /* Schedule work and access todo only with client->lock held. */
138 struct delayed_work work;
139 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
140 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
144 __be32 transaction_data[2];
145 struct iso_resource_event *e_alloc, *e_dealloc;
148 static void release_iso_resource(struct client *, struct client_resource *);
150 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
152 client_get(r->client);
153 if (!schedule_delayed_work(&r->work, delay))
154 client_put(r->client);
157 static void schedule_if_iso_resource(struct client_resource *resource)
159 if (resource->release == release_iso_resource)
160 schedule_iso_resource(container_of(resource,
161 struct iso_resource, resource), 0);
165 * dequeue_event() just kfree()'s the event, so the event has to be
166 * the first field in a struct XYZ_event.
169 struct { void *data; size_t size; } v[2];
170 struct list_head link;
173 struct bus_reset_event {
175 struct fw_cdev_event_bus_reset reset;
178 struct outbound_transaction_event {
180 struct client *client;
181 struct outbound_transaction_resource r;
182 struct fw_cdev_event_response response;
185 struct inbound_transaction_event {
188 struct fw_cdev_event_request request;
189 struct fw_cdev_event_request2 request2;
193 struct iso_interrupt_event {
195 struct fw_cdev_event_iso_interrupt interrupt;
198 struct iso_interrupt_mc_event {
200 struct fw_cdev_event_iso_interrupt_mc interrupt;
203 struct iso_resource_event {
205 struct fw_cdev_event_iso_resource iso_resource;
208 struct outbound_phy_packet_event {
210 struct client *client;
212 struct fw_cdev_event_phy_packet phy_packet;
215 struct inbound_phy_packet_event {
217 struct fw_cdev_event_phy_packet phy_packet;
220 static inline void __user *u64_to_uptr(__u64 value)
222 return (void __user *)(unsigned long)value;
225 static inline __u64 uptr_to_u64(void __user *ptr)
227 return (__u64)(unsigned long)ptr;
230 static int fw_device_op_open(struct inode *inode, struct file *file)
232 struct fw_device *device;
233 struct client *client;
235 device = fw_device_get_by_devt(inode->i_rdev);
239 if (fw_device_is_shutdown(device)) {
240 fw_device_put(device);
244 client = kzalloc(sizeof(*client), GFP_KERNEL);
245 if (client == NULL) {
246 fw_device_put(device);
250 client->device = device;
251 spin_lock_init(&client->lock);
252 idr_init(&client->resource_idr);
253 INIT_LIST_HEAD(&client->event_list);
254 init_waitqueue_head(&client->wait);
255 init_waitqueue_head(&client->tx_flush_wait);
256 INIT_LIST_HEAD(&client->phy_receiver_link);
257 kref_init(&client->kref);
259 file->private_data = client;
261 mutex_lock(&device->client_list_mutex);
262 list_add_tail(&client->link, &device->client_list);
263 mutex_unlock(&device->client_list_mutex);
265 return nonseekable_open(inode, file);
268 static void queue_event(struct client *client, struct event *event,
269 void *data0, size_t size0, void *data1, size_t size1)
273 event->v[0].data = data0;
274 event->v[0].size = size0;
275 event->v[1].data = data1;
276 event->v[1].size = size1;
278 spin_lock_irqsave(&client->lock, flags);
279 if (client->in_shutdown)
282 list_add_tail(&event->link, &client->event_list);
283 spin_unlock_irqrestore(&client->lock, flags);
285 wake_up_interruptible(&client->wait);
288 static int dequeue_event(struct client *client,
289 char __user *buffer, size_t count)
295 ret = wait_event_interruptible(client->wait,
296 !list_empty(&client->event_list) ||
297 fw_device_is_shutdown(client->device));
301 if (list_empty(&client->event_list) &&
302 fw_device_is_shutdown(client->device))
305 spin_lock_irq(&client->lock);
306 event = list_first_entry(&client->event_list, struct event, link);
307 list_del(&event->link);
308 spin_unlock_irq(&client->lock);
311 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
312 size = min(event->v[i].size, count - total);
313 if (copy_to_user(buffer + total, event->v[i].data, size)) {
327 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
328 size_t count, loff_t *offset)
330 struct client *client = file->private_data;
332 return dequeue_event(client, buffer, count);
335 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
336 struct client *client)
338 struct fw_card *card = client->device->card;
340 spin_lock_irq(&card->lock);
342 event->closure = client->bus_reset_closure;
343 event->type = FW_CDEV_EVENT_BUS_RESET;
344 event->generation = client->device->generation;
345 event->node_id = client->device->node_id;
346 event->local_node_id = card->local_node->node_id;
347 event->bm_node_id = card->bm_node_id;
348 event->irm_node_id = card->irm_node->node_id;
349 event->root_node_id = card->root_node->node_id;
351 spin_unlock_irq(&card->lock);
354 static void for_each_client(struct fw_device *device,
355 void (*callback)(struct client *client))
359 mutex_lock(&device->client_list_mutex);
360 list_for_each_entry(c, &device->client_list, link)
362 mutex_unlock(&device->client_list_mutex);
365 static int schedule_reallocations(int id, void *p, void *data)
367 schedule_if_iso_resource(p);
372 static void queue_bus_reset_event(struct client *client)
374 struct bus_reset_event *e;
376 e = kzalloc(sizeof(*e), GFP_KERNEL);
378 fw_notify("Out of memory when allocating event\n");
382 fill_bus_reset_event(&e->reset, client);
384 queue_event(client, &e->event,
385 &e->reset, sizeof(e->reset), NULL, 0);
387 spin_lock_irq(&client->lock);
388 idr_for_each(&client->resource_idr, schedule_reallocations, client);
389 spin_unlock_irq(&client->lock);
392 void fw_device_cdev_update(struct fw_device *device)
394 for_each_client(device, queue_bus_reset_event);
397 static void wake_up_client(struct client *client)
399 wake_up_interruptible(&client->wait);
402 void fw_device_cdev_remove(struct fw_device *device)
404 for_each_client(device, wake_up_client);
408 struct fw_cdev_get_info get_info;
409 struct fw_cdev_send_request send_request;
410 struct fw_cdev_allocate allocate;
411 struct fw_cdev_deallocate deallocate;
412 struct fw_cdev_send_response send_response;
413 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
414 struct fw_cdev_add_descriptor add_descriptor;
415 struct fw_cdev_remove_descriptor remove_descriptor;
416 struct fw_cdev_create_iso_context create_iso_context;
417 struct fw_cdev_queue_iso queue_iso;
418 struct fw_cdev_start_iso start_iso;
419 struct fw_cdev_stop_iso stop_iso;
420 struct fw_cdev_get_cycle_timer get_cycle_timer;
421 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
422 struct fw_cdev_send_stream_packet send_stream_packet;
423 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
424 struct fw_cdev_send_phy_packet send_phy_packet;
425 struct fw_cdev_receive_phy_packets receive_phy_packets;
426 struct fw_cdev_set_iso_channels set_iso_channels;
429 static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
431 struct fw_cdev_get_info *a = &arg->get_info;
432 struct fw_cdev_event_bus_reset bus_reset;
433 unsigned long ret = 0;
435 client->version = a->version;
436 a->version = FW_CDEV_KERNEL_VERSION;
437 a->card = client->device->card->index;
439 down_read(&fw_device_rwsem);
442 size_t want = a->rom_length;
443 size_t have = client->device->config_rom_length * 4;
445 ret = copy_to_user(u64_to_uptr(a->rom),
446 client->device->config_rom, min(want, have));
448 a->rom_length = client->device->config_rom_length * 4;
450 up_read(&fw_device_rwsem);
455 client->bus_reset_closure = a->bus_reset_closure;
456 if (a->bus_reset != 0) {
457 fill_bus_reset_event(&bus_reset, client);
458 if (copy_to_user(u64_to_uptr(a->bus_reset),
459 &bus_reset, sizeof(bus_reset)))
466 static int add_client_resource(struct client *client,
467 struct client_resource *resource, gfp_t gfp_mask)
473 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
476 spin_lock_irqsave(&client->lock, flags);
477 if (client->in_shutdown)
480 ret = idr_get_new(&client->resource_idr, resource,
484 schedule_if_iso_resource(resource);
486 spin_unlock_irqrestore(&client->lock, flags);
491 return ret < 0 ? ret : 0;
494 static int release_client_resource(struct client *client, u32 handle,
495 client_resource_release_fn_t release,
496 struct client_resource **return_resource)
498 struct client_resource *resource;
500 spin_lock_irq(&client->lock);
501 if (client->in_shutdown)
504 resource = idr_find(&client->resource_idr, handle);
505 if (resource && resource->release == release)
506 idr_remove(&client->resource_idr, handle);
507 spin_unlock_irq(&client->lock);
509 if (!(resource && resource->release == release))
513 *return_resource = resource;
515 resource->release(client, resource);
522 static void release_transaction(struct client *client,
523 struct client_resource *resource)
527 static void complete_transaction(struct fw_card *card, int rcode,
528 void *payload, size_t length, void *data)
530 struct outbound_transaction_event *e = data;
531 struct fw_cdev_event_response *rsp = &e->response;
532 struct client *client = e->client;
535 if (length < rsp->length)
536 rsp->length = length;
537 if (rcode == RCODE_COMPLETE)
538 memcpy(rsp->data, payload, rsp->length);
540 spin_lock_irqsave(&client->lock, flags);
541 idr_remove(&client->resource_idr, e->r.resource.handle);
542 if (client->in_shutdown)
543 wake_up(&client->tx_flush_wait);
544 spin_unlock_irqrestore(&client->lock, flags);
546 rsp->type = FW_CDEV_EVENT_RESPONSE;
550 * In the case that sizeof(*rsp) doesn't align with the position of the
551 * data, and the read is short, preserve an extra copy of the data
552 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
553 * for short reads and some apps depended on it, this is both safe
554 * and prudent for compatibility.
556 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
557 queue_event(client, &e->event, rsp, sizeof(*rsp),
558 rsp->data, rsp->length);
560 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
563 /* Drop the idr's reference */
565 /* Drop the transaction callback's reference */
569 static int init_request(struct client *client,
570 struct fw_cdev_send_request *request,
571 int destination_id, int speed)
573 struct outbound_transaction_event *e;
576 if (request->tcode != TCODE_STREAM_DATA &&
577 (request->length > 4096 || request->length > 512 << speed))
580 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
584 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
589 e->response.length = request->length;
590 e->response.closure = request->closure;
593 copy_from_user(e->response.data,
594 u64_to_uptr(request->data), request->length)) {
599 e->r.resource.release = release_transaction;
600 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
604 /* Get a reference for the transaction callback */
607 fw_send_request(client->device->card, &e->r.transaction,
608 request->tcode, destination_id, request->generation,
609 speed, request->offset, e->response.data,
610 request->length, complete_transaction, e);
619 static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
621 switch (arg->send_request.tcode) {
622 case TCODE_WRITE_QUADLET_REQUEST:
623 case TCODE_WRITE_BLOCK_REQUEST:
624 case TCODE_READ_QUADLET_REQUEST:
625 case TCODE_READ_BLOCK_REQUEST:
626 case TCODE_LOCK_MASK_SWAP:
627 case TCODE_LOCK_COMPARE_SWAP:
628 case TCODE_LOCK_FETCH_ADD:
629 case TCODE_LOCK_LITTLE_ADD:
630 case TCODE_LOCK_BOUNDED_ADD:
631 case TCODE_LOCK_WRAP_ADD:
632 case TCODE_LOCK_VENDOR_DEPENDENT:
638 return init_request(client, &arg->send_request, client->device->node_id,
639 client->device->max_speed);
642 static inline bool is_fcp_request(struct fw_request *request)
644 return request == NULL;
647 static void release_request(struct client *client,
648 struct client_resource *resource)
650 struct inbound_transaction_resource *r = container_of(resource,
651 struct inbound_transaction_resource, resource);
653 if (is_fcp_request(r->request))
656 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
658 fw_card_put(r->card);
662 static void handle_request(struct fw_card *card, struct fw_request *request,
663 int tcode, int destination, int source,
664 int generation, unsigned long long offset,
665 void *payload, size_t length, void *callback_data)
667 struct address_handler_resource *handler = callback_data;
668 struct inbound_transaction_resource *r;
669 struct inbound_transaction_event *e;
671 void *fcp_frame = NULL;
674 /* card may be different from handler->client->device->card */
677 r = kmalloc(sizeof(*r), GFP_ATOMIC);
678 e = kmalloc(sizeof(*e), GFP_ATOMIC);
679 if (r == NULL || e == NULL) {
680 fw_notify("Out of memory when allocating event\n");
684 r->request = request;
688 if (is_fcp_request(request)) {
690 * FIXME: Let core-transaction.c manage a
691 * single reference-counted copy?
693 fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
694 if (fcp_frame == NULL)
700 r->resource.release = release_request;
701 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
705 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
706 struct fw_cdev_event_request *req = &e->req.request;
709 tcode = TCODE_LOCK_REQUEST;
711 req->type = FW_CDEV_EVENT_REQUEST;
713 req->offset = offset;
714 req->length = length;
715 req->handle = r->resource.handle;
716 req->closure = handler->closure;
717 event_size0 = sizeof(*req);
719 struct fw_cdev_event_request2 *req = &e->req.request2;
721 req->type = FW_CDEV_EVENT_REQUEST2;
723 req->offset = offset;
724 req->source_node_id = source;
725 req->destination_node_id = destination;
726 req->card = card->index;
727 req->generation = generation;
728 req->length = length;
729 req->handle = r->resource.handle;
730 req->closure = handler->closure;
731 event_size0 = sizeof(*req);
734 queue_event(handler->client, &e->event,
735 &e->req, event_size0, r->data, length);
743 if (!is_fcp_request(request))
744 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
749 static void release_address_handler(struct client *client,
750 struct client_resource *resource)
752 struct address_handler_resource *r =
753 container_of(resource, struct address_handler_resource, resource);
755 fw_core_remove_address_handler(&r->handler);
759 static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
761 struct fw_cdev_allocate *a = &arg->allocate;
762 struct address_handler_resource *r;
763 struct fw_address_region region;
766 r = kmalloc(sizeof(*r), GFP_KERNEL);
770 region.start = a->offset;
771 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
772 region.end = a->offset + a->length;
774 region.end = a->region_end;
776 r->handler.length = a->length;
777 r->handler.address_callback = handle_request;
778 r->handler.callback_data = r;
779 r->closure = a->closure;
782 ret = fw_core_add_address_handler(&r->handler, ®ion);
787 a->offset = r->handler.offset;
789 r->resource.release = release_address_handler;
790 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
792 release_address_handler(client, &r->resource);
795 a->handle = r->resource.handle;
800 static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
802 return release_client_resource(client, arg->deallocate.handle,
803 release_address_handler, NULL);
806 static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
808 struct fw_cdev_send_response *a = &arg->send_response;
809 struct client_resource *resource;
810 struct inbound_transaction_resource *r;
813 if (release_client_resource(client, a->handle,
814 release_request, &resource) < 0)
817 r = container_of(resource, struct inbound_transaction_resource,
819 if (is_fcp_request(r->request))
822 if (a->length != fw_get_response_length(r->request)) {
827 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
832 fw_send_response(r->card, r->request, a->rcode);
834 fw_card_put(r->card);
840 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
842 fw_schedule_bus_reset(client->device->card, true,
843 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
847 static void release_descriptor(struct client *client,
848 struct client_resource *resource)
850 struct descriptor_resource *r =
851 container_of(resource, struct descriptor_resource, resource);
853 fw_core_remove_descriptor(&r->descriptor);
857 static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
859 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
860 struct descriptor_resource *r;
863 /* Access policy: Allow this ioctl only on local nodes' device files. */
864 if (!client->device->is_local)
870 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
874 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
879 r->descriptor.length = a->length;
880 r->descriptor.immediate = a->immediate;
881 r->descriptor.key = a->key;
882 r->descriptor.data = r->data;
884 ret = fw_core_add_descriptor(&r->descriptor);
888 r->resource.release = release_descriptor;
889 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
891 fw_core_remove_descriptor(&r->descriptor);
894 a->handle = r->resource.handle;
903 static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
905 return release_client_resource(client, arg->remove_descriptor.handle,
906 release_descriptor, NULL);
909 static void iso_callback(struct fw_iso_context *context, u32 cycle,
910 size_t header_length, void *header, void *data)
912 struct client *client = data;
913 struct iso_interrupt_event *e;
915 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
917 fw_notify("Out of memory when allocating event\n");
920 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
921 e->interrupt.closure = client->iso_closure;
922 e->interrupt.cycle = cycle;
923 e->interrupt.header_length = header_length;
924 memcpy(e->interrupt.header, header, header_length);
925 queue_event(client, &e->event, &e->interrupt,
926 sizeof(e->interrupt) + header_length, NULL, 0);
929 static void iso_mc_callback(struct fw_iso_context *context,
930 dma_addr_t completed, void *data)
932 struct client *client = data;
933 struct iso_interrupt_mc_event *e;
935 e = kmalloc(sizeof(*e), GFP_ATOMIC);
937 fw_notify("Out of memory when allocating event\n");
940 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
941 e->interrupt.closure = client->iso_closure;
942 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
944 queue_event(client, &e->event, &e->interrupt,
945 sizeof(e->interrupt), NULL, 0);
948 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
950 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
951 struct fw_iso_context *context;
952 fw_iso_callback_t cb;
954 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
955 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
956 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
957 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
960 case FW_ISO_CONTEXT_TRANSMIT:
961 if (a->speed > SCODE_3200 || a->channel > 63)
967 case FW_ISO_CONTEXT_RECEIVE:
968 if (a->header_size < 4 || (a->header_size & 3) ||
975 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
976 cb = (fw_iso_callback_t)iso_mc_callback;
983 context = fw_iso_context_create(client->device->card, a->type,
984 a->channel, a->speed, a->header_size, cb, client);
986 return PTR_ERR(context);
988 /* We only support one context at this time. */
989 spin_lock_irq(&client->lock);
990 if (client->iso_context != NULL) {
991 spin_unlock_irq(&client->lock);
992 fw_iso_context_destroy(context);
995 client->iso_closure = a->closure;
996 client->iso_context = context;
997 spin_unlock_irq(&client->lock);
1004 static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1006 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1007 struct fw_iso_context *ctx = client->iso_context;
1009 if (ctx == NULL || a->handle != 0)
1012 return fw_iso_context_set_channels(ctx, &a->channels);
1015 /* Macros for decoding the iso packet control header. */
1016 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1017 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1018 #define GET_SKIP(v) (((v) >> 17) & 0x01)
1019 #define GET_TAG(v) (((v) >> 18) & 0x03)
1020 #define GET_SY(v) (((v) >> 20) & 0x0f)
1021 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1023 static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1025 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1026 struct fw_cdev_iso_packet __user *p, *end, *next;
1027 struct fw_iso_context *ctx = client->iso_context;
1028 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1032 struct fw_iso_packet packet;
1036 if (ctx == NULL || a->handle != 0)
1040 * If the user passes a non-NULL data pointer, has mmap()'ed
1041 * the iso buffer, and the pointer points inside the buffer,
1042 * we setup the payload pointers accordingly. Otherwise we
1043 * set them both to 0, which will still let packets with
1044 * payload_length == 0 through. In other words, if no packets
1045 * use the indirect payload, the iso buffer need not be mapped
1046 * and the a->data pointer is ignored.
1048 payload = (unsigned long)a->data - client->vm_start;
1049 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1050 if (a->data == 0 || client->buffer.pages == NULL ||
1051 payload >= buffer_end) {
1056 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1059 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1060 if (!access_ok(VERIFY_READ, p, a->size))
1063 end = (void __user *)p + a->size;
1066 if (get_user(control, &p->control))
1068 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1069 u.packet.interrupt = GET_INTERRUPT(control);
1070 u.packet.skip = GET_SKIP(control);
1071 u.packet.tag = GET_TAG(control);
1072 u.packet.sy = GET_SY(control);
1073 u.packet.header_length = GET_HEADER_LENGTH(control);
1075 switch (ctx->type) {
1076 case FW_ISO_CONTEXT_TRANSMIT:
1077 if (u.packet.header_length & 3)
1079 transmit_header_bytes = u.packet.header_length;
1082 case FW_ISO_CONTEXT_RECEIVE:
1083 if (u.packet.header_length == 0 ||
1084 u.packet.header_length % ctx->header_size != 0)
1088 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1089 if (u.packet.payload_length == 0 ||
1090 u.packet.payload_length & 3)
1095 next = (struct fw_cdev_iso_packet __user *)
1096 &p->header[transmit_header_bytes / 4];
1099 if (__copy_from_user
1100 (u.packet.header, p->header, transmit_header_bytes))
1102 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1103 u.packet.header_length + u.packet.payload_length > 0)
1105 if (payload + u.packet.payload_length > buffer_end)
1108 if (fw_iso_context_queue(ctx, &u.packet,
1109 &client->buffer, payload))
1113 payload += u.packet.payload_length;
1117 a->size -= uptr_to_u64(p) - a->packets;
1118 a->packets = uptr_to_u64(p);
1119 a->data = client->vm_start + payload;
1124 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1126 struct fw_cdev_start_iso *a = &arg->start_iso;
1129 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1130 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1131 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1132 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1133 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1135 if (client->iso_context == NULL || a->handle != 0)
1138 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1139 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1142 return fw_iso_context_start(client->iso_context,
1143 a->cycle, a->sync, a->tags);
1146 static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1148 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1150 if (client->iso_context == NULL || a->handle != 0)
1153 return fw_iso_context_stop(client->iso_context);
1156 static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1158 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1159 struct fw_card *card = client->device->card;
1160 struct timespec ts = {0, 0};
1164 local_irq_disable();
1166 cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
1168 switch (a->clk_id) {
1169 case CLOCK_REALTIME: getnstimeofday(&ts); break;
1170 case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
1171 case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
1178 a->tv_sec = ts.tv_sec;
1179 a->tv_nsec = ts.tv_nsec;
1180 a->cycle_timer = cycle_time;
1185 static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1187 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1188 struct fw_cdev_get_cycle_timer2 ct2;
1190 ct2.clk_id = CLOCK_REALTIME;
1191 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1193 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1194 a->cycle_timer = ct2.cycle_timer;
1199 static void iso_resource_work(struct work_struct *work)
1201 struct iso_resource_event *e;
1202 struct iso_resource *r =
1203 container_of(work, struct iso_resource, work.work);
1204 struct client *client = r->client;
1205 int generation, channel, bandwidth, todo;
1206 bool skip, free, success;
1208 spin_lock_irq(&client->lock);
1209 generation = client->device->generation;
1211 /* Allow 1000ms grace period for other reallocations. */
1212 if (todo == ISO_RES_ALLOC &&
1213 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1214 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1217 /* We could be called twice within the same generation. */
1218 skip = todo == ISO_RES_REALLOC &&
1219 r->generation == generation;
1221 free = todo == ISO_RES_DEALLOC ||
1222 todo == ISO_RES_ALLOC_ONCE ||
1223 todo == ISO_RES_DEALLOC_ONCE;
1224 r->generation = generation;
1225 spin_unlock_irq(&client->lock);
1230 bandwidth = r->bandwidth;
1232 fw_iso_resource_manage(client->device->card, generation,
1233 r->channels, &channel, &bandwidth,
1234 todo == ISO_RES_ALLOC ||
1235 todo == ISO_RES_REALLOC ||
1236 todo == ISO_RES_ALLOC_ONCE,
1237 r->transaction_data);
1239 * Is this generation outdated already? As long as this resource sticks
1240 * in the idr, it will be scheduled again for a newer generation or at
1243 if (channel == -EAGAIN &&
1244 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1247 success = channel >= 0 || bandwidth > 0;
1249 spin_lock_irq(&client->lock);
1251 * Transit from allocation to reallocation, except if the client
1252 * requested deallocation in the meantime.
1254 if (r->todo == ISO_RES_ALLOC)
1255 r->todo = ISO_RES_REALLOC;
1257 * Allocation or reallocation failure? Pull this resource out of the
1258 * idr and prepare for deletion, unless the client is shutting down.
1260 if (r->todo == ISO_RES_REALLOC && !success &&
1261 !client->in_shutdown &&
1262 idr_find(&client->resource_idr, r->resource.handle)) {
1263 idr_remove(&client->resource_idr, r->resource.handle);
1267 spin_unlock_irq(&client->lock);
1269 if (todo == ISO_RES_ALLOC && channel >= 0)
1270 r->channels = 1ULL << channel;
1272 if (todo == ISO_RES_REALLOC && success)
1275 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1280 r->e_dealloc = NULL;
1282 e->iso_resource.handle = r->resource.handle;
1283 e->iso_resource.channel = channel;
1284 e->iso_resource.bandwidth = bandwidth;
1286 queue_event(client, &e->event,
1287 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1290 cancel_delayed_work(&r->work);
1292 kfree(r->e_dealloc);
1299 static void release_iso_resource(struct client *client,
1300 struct client_resource *resource)
1302 struct iso_resource *r =
1303 container_of(resource, struct iso_resource, resource);
1305 spin_lock_irq(&client->lock);
1306 r->todo = ISO_RES_DEALLOC;
1307 schedule_iso_resource(r, 0);
1308 spin_unlock_irq(&client->lock);
1311 static int init_iso_resource(struct client *client,
1312 struct fw_cdev_allocate_iso_resource *request, int todo)
1314 struct iso_resource_event *e1, *e2;
1315 struct iso_resource *r;
1318 if ((request->channels == 0 && request->bandwidth == 0) ||
1319 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1320 request->bandwidth < 0)
1323 r = kmalloc(sizeof(*r), GFP_KERNEL);
1324 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1325 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1326 if (r == NULL || e1 == NULL || e2 == NULL) {
1331 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1335 r->channels = request->channels;
1336 r->bandwidth = request->bandwidth;
1340 e1->iso_resource.closure = request->closure;
1341 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1342 e2->iso_resource.closure = request->closure;
1343 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1345 if (todo == ISO_RES_ALLOC) {
1346 r->resource.release = release_iso_resource;
1347 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1351 r->resource.release = NULL;
1352 r->resource.handle = -1;
1353 schedule_iso_resource(r, 0);
1355 request->handle = r->resource.handle;
1366 static int ioctl_allocate_iso_resource(struct client *client,
1367 union ioctl_arg *arg)
1369 return init_iso_resource(client,
1370 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1373 static int ioctl_deallocate_iso_resource(struct client *client,
1374 union ioctl_arg *arg)
1376 return release_client_resource(client,
1377 arg->deallocate.handle, release_iso_resource, NULL);
1380 static int ioctl_allocate_iso_resource_once(struct client *client,
1381 union ioctl_arg *arg)
1383 return init_iso_resource(client,
1384 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1387 static int ioctl_deallocate_iso_resource_once(struct client *client,
1388 union ioctl_arg *arg)
1390 return init_iso_resource(client,
1391 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1395 * Returns a speed code: Maximum speed to or from this device,
1396 * limited by the device's link speed, the local node's link speed,
1397 * and all PHY port speeds between the two links.
1399 static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1401 return client->device->max_speed;
1404 static int ioctl_send_broadcast_request(struct client *client,
1405 union ioctl_arg *arg)
1407 struct fw_cdev_send_request *a = &arg->send_request;
1410 case TCODE_WRITE_QUADLET_REQUEST:
1411 case TCODE_WRITE_BLOCK_REQUEST:
1417 /* Security policy: Only allow accesses to Units Space. */
1418 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1421 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1424 static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1426 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1427 struct fw_cdev_send_request request;
1430 if (a->speed > client->device->card->link_speed ||
1431 a->length > 1024 << a->speed)
1434 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1437 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1438 request.tcode = TCODE_STREAM_DATA;
1439 request.length = a->length;
1440 request.closure = a->closure;
1441 request.data = a->data;
1442 request.generation = a->generation;
1444 return init_request(client, &request, dest, a->speed);
1447 static void outbound_phy_packet_callback(struct fw_packet *packet,
1448 struct fw_card *card, int status)
1450 struct outbound_phy_packet_event *e =
1451 container_of(packet, struct outbound_phy_packet_event, p);
1455 case ACK_COMPLETE: e->phy_packet.rcode = RCODE_COMPLETE; break;
1456 /* should never happen with PHY packets: */
1457 case ACK_PENDING: e->phy_packet.rcode = RCODE_COMPLETE; break;
1460 case ACK_BUSY_B: e->phy_packet.rcode = RCODE_BUSY; break;
1461 case ACK_DATA_ERROR: e->phy_packet.rcode = RCODE_DATA_ERROR; break;
1462 case ACK_TYPE_ERROR: e->phy_packet.rcode = RCODE_TYPE_ERROR; break;
1463 /* stale generation; cancelled; on certain controllers: no ack */
1464 default: e->phy_packet.rcode = status; break;
1466 e->phy_packet.data[0] = packet->timestamp;
1468 queue_event(e->client, &e->event, &e->phy_packet,
1469 sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
1470 client_put(e->client);
1473 static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1475 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1476 struct fw_card *card = client->device->card;
1477 struct outbound_phy_packet_event *e;
1479 /* Access policy: Allow this ioctl only on local nodes' device files. */
1480 if (!client->device->is_local)
1483 e = kzalloc(sizeof(*e) + 4, GFP_KERNEL);
1489 e->p.speed = SCODE_100;
1490 e->p.generation = a->generation;
1491 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1492 e->p.header[1] = a->data[0];
1493 e->p.header[2] = a->data[1];
1494 e->p.header_length = 12;
1495 e->p.callback = outbound_phy_packet_callback;
1496 e->phy_packet.closure = a->closure;
1497 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1498 if (is_ping_packet(a->data))
1499 e->phy_packet.length = 4;
1501 card->driver->send_request(card, &e->p);
1506 static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1508 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1509 struct fw_card *card = client->device->card;
1511 /* Access policy: Allow this ioctl only on local nodes' device files. */
1512 if (!client->device->is_local)
1515 spin_lock_irq(&card->lock);
1517 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1518 client->phy_receiver_closure = a->closure;
1520 spin_unlock_irq(&card->lock);
1525 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1527 struct client *client;
1528 struct inbound_phy_packet_event *e;
1529 unsigned long flags;
1531 spin_lock_irqsave(&card->lock, flags);
1533 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1534 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1536 fw_notify("Out of memory when allocating event\n");
1539 e->phy_packet.closure = client->phy_receiver_closure;
1540 e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1541 e->phy_packet.rcode = RCODE_COMPLETE;
1542 e->phy_packet.length = 8;
1543 e->phy_packet.data[0] = p->header[1];
1544 e->phy_packet.data[1] = p->header[2];
1545 queue_event(client, &e->event,
1546 &e->phy_packet, sizeof(e->phy_packet) + 8, NULL, 0);
1549 spin_unlock_irqrestore(&card->lock, flags);
1552 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1553 [0x00] = ioctl_get_info,
1554 [0x01] = ioctl_send_request,
1555 [0x02] = ioctl_allocate,
1556 [0x03] = ioctl_deallocate,
1557 [0x04] = ioctl_send_response,
1558 [0x05] = ioctl_initiate_bus_reset,
1559 [0x06] = ioctl_add_descriptor,
1560 [0x07] = ioctl_remove_descriptor,
1561 [0x08] = ioctl_create_iso_context,
1562 [0x09] = ioctl_queue_iso,
1563 [0x0a] = ioctl_start_iso,
1564 [0x0b] = ioctl_stop_iso,
1565 [0x0c] = ioctl_get_cycle_timer,
1566 [0x0d] = ioctl_allocate_iso_resource,
1567 [0x0e] = ioctl_deallocate_iso_resource,
1568 [0x0f] = ioctl_allocate_iso_resource_once,
1569 [0x10] = ioctl_deallocate_iso_resource_once,
1570 [0x11] = ioctl_get_speed,
1571 [0x12] = ioctl_send_broadcast_request,
1572 [0x13] = ioctl_send_stream_packet,
1573 [0x14] = ioctl_get_cycle_timer2,
1574 [0x15] = ioctl_send_phy_packet,
1575 [0x16] = ioctl_receive_phy_packets,
1576 [0x17] = ioctl_set_iso_channels,
1579 static int dispatch_ioctl(struct client *client,
1580 unsigned int cmd, void __user *arg)
1582 union ioctl_arg buffer;
1585 if (fw_device_is_shutdown(client->device))
1588 if (_IOC_TYPE(cmd) != '#' ||
1589 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1590 _IOC_SIZE(cmd) > sizeof(buffer))
1593 if (_IOC_DIR(cmd) == _IOC_READ)
1594 memset(&buffer, 0, _IOC_SIZE(cmd));
1596 if (_IOC_DIR(cmd) & _IOC_WRITE)
1597 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1600 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1604 if (_IOC_DIR(cmd) & _IOC_READ)
1605 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1611 static long fw_device_op_ioctl(struct file *file,
1612 unsigned int cmd, unsigned long arg)
1614 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1617 #ifdef CONFIG_COMPAT
1618 static long fw_device_op_compat_ioctl(struct file *file,
1619 unsigned int cmd, unsigned long arg)
1621 return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
1625 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1627 struct client *client = file->private_data;
1628 enum dma_data_direction direction;
1630 int page_count, ret;
1632 if (fw_device_is_shutdown(client->device))
1635 /* FIXME: We could support multiple buffers, but we don't. */
1636 if (client->buffer.pages != NULL)
1639 if (!(vma->vm_flags & VM_SHARED))
1642 if (vma->vm_start & ~PAGE_MASK)
1645 client->vm_start = vma->vm_start;
1646 size = vma->vm_end - vma->vm_start;
1647 page_count = size >> PAGE_SHIFT;
1648 if (size & ~PAGE_MASK)
1651 if (vma->vm_flags & VM_WRITE)
1652 direction = DMA_TO_DEVICE;
1654 direction = DMA_FROM_DEVICE;
1656 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1657 page_count, direction);
1661 ret = fw_iso_buffer_map(&client->buffer, vma);
1663 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1668 static int is_outbound_transaction_resource(int id, void *p, void *data)
1670 struct client_resource *resource = p;
1672 return resource->release == release_transaction;
1675 static int has_outbound_transactions(struct client *client)
1679 spin_lock_irq(&client->lock);
1680 ret = idr_for_each(&client->resource_idr,
1681 is_outbound_transaction_resource, NULL);
1682 spin_unlock_irq(&client->lock);
1687 static int shutdown_resource(int id, void *p, void *data)
1689 struct client_resource *resource = p;
1690 struct client *client = data;
1692 resource->release(client, resource);
1698 static int fw_device_op_release(struct inode *inode, struct file *file)
1700 struct client *client = file->private_data;
1701 struct event *event, *next_event;
1703 spin_lock_irq(&client->device->card->lock);
1704 list_del(&client->phy_receiver_link);
1705 spin_unlock_irq(&client->device->card->lock);
1707 mutex_lock(&client->device->client_list_mutex);
1708 list_del(&client->link);
1709 mutex_unlock(&client->device->client_list_mutex);
1711 if (client->iso_context)
1712 fw_iso_context_destroy(client->iso_context);
1714 if (client->buffer.pages)
1715 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1717 /* Freeze client->resource_idr and client->event_list */
1718 spin_lock_irq(&client->lock);
1719 client->in_shutdown = true;
1720 spin_unlock_irq(&client->lock);
1722 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1724 idr_for_each(&client->resource_idr, shutdown_resource, client);
1725 idr_remove_all(&client->resource_idr);
1726 idr_destroy(&client->resource_idr);
1728 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1736 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1738 struct client *client = file->private_data;
1739 unsigned int mask = 0;
1741 poll_wait(file, &client->wait, pt);
1743 if (fw_device_is_shutdown(client->device))
1744 mask |= POLLHUP | POLLERR;
1745 if (!list_empty(&client->event_list))
1746 mask |= POLLIN | POLLRDNORM;
1751 const struct file_operations fw_device_ops = {
1752 .owner = THIS_MODULE,
1753 .llseek = no_llseek,
1754 .open = fw_device_op_open,
1755 .read = fw_device_op_read,
1756 .unlocked_ioctl = fw_device_op_ioctl,
1757 .mmap = fw_device_op_mmap,
1758 .release = fw_device_op_release,
1759 .poll = fw_device_op_poll,
1760 #ifdef CONFIG_COMPAT
1761 .compat_ioctl = fw_device_op_compat_ioctl,