--- /dev/null
- .owner = THIS_MODULE,
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+ * IPX proc routines
+ *
+ * Copyright(C) Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2002
+ */
+
+ #include <linux/init.h>
+ #ifdef CONFIG_PROC_FS
+ #include <linux/proc_fs.h>
+ #include <linux/spinlock.h>
+ #include <linux/seq_file.h>
+ #include <linux/export.h>
+ #include <net/net_namespace.h>
+ #include <net/tcp_states.h>
+ #include <net/ipx.h>
+
+ static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos)
+ {
+ spin_lock_bh(&ipx_interfaces_lock);
+ return seq_list_start_head(&ipx_interfaces, *pos);
+ }
+
+ static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
+ return seq_list_next(v, &ipx_interfaces, pos);
+ }
+
+ static void ipx_seq_interface_stop(struct seq_file *seq, void *v)
+ {
+ spin_unlock_bh(&ipx_interfaces_lock);
+ }
+
+ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
+ {
+ struct ipx_interface *i;
+
+ if (v == &ipx_interfaces) {
+ seq_puts(seq, "Network Node_Address Primary Device "
+ "Frame_Type");
+ #ifdef IPX_REFCNT_DEBUG
+ seq_puts(seq, " refcnt");
+ #endif
+ seq_puts(seq, "\n");
+ goto out;
+ }
+
+ i = list_entry(v, struct ipx_interface, node);
+ seq_printf(seq, "%08X ", ntohl(i->if_netnum));
+ seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
+ i->if_node[0], i->if_node[1], i->if_node[2],
+ i->if_node[3], i->if_node[4], i->if_node[5]);
+ seq_printf(seq, "%-9s", i == ipx_primary_net ? "Yes" : "No");
+ seq_printf(seq, "%-11s", ipx_device_name(i));
+ seq_printf(seq, "%-9s", ipx_frame_name(i->if_dlink_type));
+ #ifdef IPX_REFCNT_DEBUG
+ seq_printf(seq, "%6d", refcount_read(&i->refcnt));
+ #endif
+ seq_puts(seq, "\n");
+ out:
+ return 0;
+ }
+
+ static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos)
+ {
+ read_lock_bh(&ipx_routes_lock);
+ return seq_list_start_head(&ipx_routes, *pos);
+ }
+
+ static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
+ return seq_list_next(v, &ipx_routes, pos);
+ }
+
+ static void ipx_seq_route_stop(struct seq_file *seq, void *v)
+ {
+ read_unlock_bh(&ipx_routes_lock);
+ }
+
+ static int ipx_seq_route_show(struct seq_file *seq, void *v)
+ {
+ struct ipx_route *rt;
+
+ if (v == &ipx_routes) {
+ seq_puts(seq, "Network Router_Net Router_Node\n");
+ goto out;
+ }
+
+ rt = list_entry(v, struct ipx_route, node);
+
+ seq_printf(seq, "%08X ", ntohl(rt->ir_net));
+ if (rt->ir_routed)
+ seq_printf(seq, "%08X %02X%02X%02X%02X%02X%02X\n",
+ ntohl(rt->ir_intrfc->if_netnum),
+ rt->ir_router_node[0], rt->ir_router_node[1],
+ rt->ir_router_node[2], rt->ir_router_node[3],
+ rt->ir_router_node[4], rt->ir_router_node[5]);
+ else
+ seq_puts(seq, "Directly Connected\n");
+ out:
+ return 0;
+ }
+
+ static __inline__ struct sock *ipx_get_socket_idx(loff_t pos)
+ {
+ struct sock *s = NULL;
+ struct ipx_interface *i;
+
+ list_for_each_entry(i, &ipx_interfaces, node) {
+ spin_lock_bh(&i->if_sklist_lock);
+ sk_for_each(s, &i->if_sklist) {
+ if (!pos)
+ break;
+ --pos;
+ }
+ spin_unlock_bh(&i->if_sklist_lock);
+ if (!pos) {
+ if (s)
+ goto found;
+ break;
+ }
+ }
+ s = NULL;
+ found:
+ return s;
+ }
+
+ static void *ipx_seq_socket_start(struct seq_file *seq, loff_t *pos)
+ {
+ loff_t l = *pos;
+
+ spin_lock_bh(&ipx_interfaces_lock);
+ return l ? ipx_get_socket_idx(--l) : SEQ_START_TOKEN;
+ }
+
+ static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
+ struct sock* sk, *next;
+ struct ipx_interface *i;
+ struct ipx_sock *ipxs;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN) {
+ sk = NULL;
+ i = ipx_interfaces_head();
+ if (!i)
+ goto out;
+ sk = sk_head(&i->if_sklist);
+ if (sk)
+ spin_lock_bh(&i->if_sklist_lock);
+ goto out;
+ }
+ sk = v;
+ next = sk_next(sk);
+ if (next) {
+ sk = next;
+ goto out;
+ }
+ ipxs = ipx_sk(sk);
+ i = ipxs->intrfc;
+ spin_unlock_bh(&i->if_sklist_lock);
+ sk = NULL;
+ for (;;) {
+ if (i->node.next == &ipx_interfaces)
+ break;
+ i = list_entry(i->node.next, struct ipx_interface, node);
+ spin_lock_bh(&i->if_sklist_lock);
+ if (!hlist_empty(&i->if_sklist)) {
+ sk = sk_head(&i->if_sklist);
+ break;
+ }
+ spin_unlock_bh(&i->if_sklist_lock);
+ }
+ out:
+ return sk;
+ }
+
+ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
+ {
+ struct sock *s;
+ struct ipx_sock *ipxs;
+
+ if (v == SEQ_START_TOKEN) {
+ #ifdef CONFIG_IPX_INTERN
+ seq_puts(seq, "Local_Address "
+ "Remote_Address Tx_Queue "
+ "Rx_Queue State Uid\n");
+ #else
+ seq_puts(seq, "Local_Address Remote_Address "
+ "Tx_Queue Rx_Queue State Uid\n");
+ #endif
+ goto out;
+ }
+
+ s = v;
+ ipxs = ipx_sk(s);
+ #ifdef CONFIG_IPX_INTERN
+ seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ",
+ ntohl(ipxs->intrfc->if_netnum),
+ ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3],
+ ipxs->node[4], ipxs->node[5], ntohs(ipxs->port));
+ #else
+ seq_printf(seq, "%08X:%04X ", ntohl(ipxs->intrfc->if_netnum),
+ ntohs(ipxs->port));
+ #endif /* CONFIG_IPX_INTERN */
+ if (s->sk_state != TCP_ESTABLISHED)
+ seq_printf(seq, "%-28s", "Not_Connected");
+ else {
+ seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ",
+ ntohl(ipxs->dest_addr.net),
+ ipxs->dest_addr.node[0], ipxs->dest_addr.node[1],
+ ipxs->dest_addr.node[2], ipxs->dest_addr.node[3],
+ ipxs->dest_addr.node[4], ipxs->dest_addr.node[5],
+ ntohs(ipxs->dest_addr.sock));
+ }
+
+ seq_printf(seq, "%08X %08X %02X %03u\n",
+ sk_wmem_alloc_get(s),
+ sk_rmem_alloc_get(s),
+ s->sk_state,
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
+ out:
+ return 0;
+ }
+
+ static const struct seq_operations ipx_seq_interface_ops = {
+ .start = ipx_seq_interface_start,
+ .next = ipx_seq_interface_next,
+ .stop = ipx_seq_interface_stop,
+ .show = ipx_seq_interface_show,
+ };
+
+ static const struct seq_operations ipx_seq_route_ops = {
+ .start = ipx_seq_route_start,
+ .next = ipx_seq_route_next,
+ .stop = ipx_seq_route_stop,
+ .show = ipx_seq_route_show,
+ };
+
+ static const struct seq_operations ipx_seq_socket_ops = {
+ .start = ipx_seq_socket_start,
+ .next = ipx_seq_socket_next,
+ .stop = ipx_seq_interface_stop,
+ .show = ipx_seq_socket_show,
+ };
+
+ static int ipx_seq_route_open(struct inode *inode, struct file *file)
+ {
+ return seq_open(file, &ipx_seq_route_ops);
+ }
+
+ static int ipx_seq_interface_open(struct inode *inode, struct file *file)
+ {
+ return seq_open(file, &ipx_seq_interface_ops);
+ }
+
+ static int ipx_seq_socket_open(struct inode *inode, struct file *file)
+ {
+ return seq_open(file, &ipx_seq_socket_ops);
+ }
+
+ static const struct file_operations ipx_seq_interface_fops = {
- .owner = THIS_MODULE,
+ .open = ipx_seq_interface_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ };
+
+ static const struct file_operations ipx_seq_route_fops = {
- .owner = THIS_MODULE,
+ .open = ipx_seq_route_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ };
+
+ static const struct file_operations ipx_seq_socket_fops = {
+ .open = ipx_seq_socket_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ };
+
+ static struct proc_dir_entry *ipx_proc_dir;
+
+ int __init ipx_proc_init(void)
+ {
+ struct proc_dir_entry *p;
+ int rc = -ENOMEM;
+
+ ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
+
+ if (!ipx_proc_dir)
+ goto out;
+ p = proc_create("interface", S_IRUGO,
+ ipx_proc_dir, &ipx_seq_interface_fops);
+ if (!p)
+ goto out_interface;
+
+ p = proc_create("route", S_IRUGO, ipx_proc_dir, &ipx_seq_route_fops);
+ if (!p)
+ goto out_route;
+
+ p = proc_create("socket", S_IRUGO, ipx_proc_dir, &ipx_seq_socket_fops);
+ if (!p)
+ goto out_socket;
+
+ rc = 0;
+ out:
+ return rc;
+ out_socket:
+ remove_proc_entry("route", ipx_proc_dir);
+ out_route:
+ remove_proc_entry("interface", ipx_proc_dir);
+ out_interface:
+ remove_proc_entry("ipx", init_net.proc_net);
+ goto out;
+ }
+
+ void __exit ipx_proc_exit(void)
+ {
+ remove_proc_entry("interface", ipx_proc_dir);
+ remove_proc_entry("route", ipx_proc_dir);
+ remove_proc_entry("socket", ipx_proc_dir);
+ remove_proc_entry("ipx", init_net.proc_net);
+ }
+
+ #else /* CONFIG_PROC_FS */
+
+ int __init ipx_proc_init(void)
+ {
+ return 0;
+ }
+
+ void __exit ipx_proc_exit(void)
+ {
+ }
+
+ #endif /* CONFIG_PROC_FS */
--- /dev/null
-static unsigned int comp_poll(struct file *filp, poll_table *wait)
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+ * cdev.c - Character device component for Mostcore
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ #include <linux/module.h>
+ #include <linux/sched.h>
+ #include <linux/fs.h>
+ #include <linux/slab.h>
+ #include <linux/device.h>
+ #include <linux/cdev.h>
+ #include <linux/poll.h>
+ #include <linux/kfifo.h>
+ #include <linux/uaccess.h>
+ #include <linux/idr.h>
+ #include "most/core.h"
+
+ static struct cdev_component {
+ dev_t devno;
+ struct ida minor_id;
+ unsigned int major;
+ struct class *class;
+ struct core_component cc;
+ } comp;
+
+ struct comp_channel {
+ wait_queue_head_t wq;
+ spinlock_t unlink; /* synchronization lock to unlink channels */
+ struct cdev cdev;
+ struct device *dev;
+ struct mutex io_mutex;
+ struct most_interface *iface;
+ struct most_channel_config *cfg;
+ unsigned int channel_id;
+ dev_t devno;
+ size_t mbo_offs;
+ DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
+ int access_ref;
+ struct list_head list;
+ };
+
+ #define to_channel(d) container_of(d, struct comp_channel, cdev)
+ static struct list_head channel_list;
+ static spinlock_t ch_list_lock;
+
+ static inline bool ch_has_mbo(struct comp_channel *c)
+ {
+ return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
+ }
+
+ static inline bool ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
+ {
+ if (!kfifo_peek(&c->fifo, mbo)) {
+ *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
+ if (*mbo)
+ kfifo_in(&c->fifo, mbo, 1);
+ }
+ return *mbo;
+ }
+
+ static struct comp_channel *get_channel(struct most_interface *iface, int id)
+ {
+ struct comp_channel *c, *tmp;
+ unsigned long flags;
+ int found_channel = 0;
+
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ if ((c->iface == iface) && (c->channel_id == id)) {
+ found_channel = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ if (!found_channel)
+ return NULL;
+ return c;
+ }
+
+ static void stop_channel(struct comp_channel *c)
+ {
+ struct mbo *mbo;
+
+ while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
+ most_put_mbo(mbo);
+ most_stop_channel(c->iface, c->channel_id, &comp.cc);
+ }
+
+ static void destroy_cdev(struct comp_channel *c)
+ {
+ unsigned long flags;
+
+ device_destroy(comp.class, c->devno);
+ cdev_del(&c->cdev);
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_del(&c->list);
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ }
+
+ static void destroy_channel(struct comp_channel *c)
+ {
+ ida_simple_remove(&comp.minor_id, MINOR(c->devno));
+ kfifo_free(&c->fifo);
+ kfree(c);
+ }
+
+ /**
+ * comp_open - implements the syscall to open the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stores the channel pointer in the private data field of
+ * the file structure and activates the channel within the core.
+ */
+ static int comp_open(struct inode *inode, struct file *filp)
+ {
+ struct comp_channel *c;
+ int ret;
+
+ c = to_channel(inode->i_cdev);
+ filp->private_data = c;
+
+ if (((c->cfg->direction == MOST_CH_RX) &&
+ ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
+ ((c->cfg->direction == MOST_CH_TX) &&
+ ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
+ pr_info("WARN: Access flags mismatch\n");
+ return -EACCES;
+ }
+
+ mutex_lock(&c->io_mutex);
+ if (!c->dev) {
+ pr_info("WARN: Device is destroyed\n");
+ mutex_unlock(&c->io_mutex);
+ return -ENODEV;
+ }
+
+ if (c->access_ref) {
+ pr_info("WARN: Device is busy\n");
+ mutex_unlock(&c->io_mutex);
+ return -EBUSY;
+ }
+
+ c->mbo_offs = 0;
+ ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
+ if (!ret)
+ c->access_ref = 1;
+ mutex_unlock(&c->io_mutex);
+ return ret;
+ }
+
+ /**
+ * comp_close - implements the syscall to close the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stops the channel within the core.
+ */
+ static int comp_close(struct inode *inode, struct file *filp)
+ {
+ struct comp_channel *c = to_channel(inode->i_cdev);
+
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->access_ref = 0;
+ spin_unlock(&c->unlink);
+ if (c->dev) {
+ stop_channel(c);
+ mutex_unlock(&c->io_mutex);
+ } else {
+ mutex_unlock(&c->io_mutex);
+ destroy_channel(c);
+ }
+ return 0;
+ }
+
+ /**
+ * comp_write - implements the syscall to write to the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to write
+ * @offset: offset from where to start writing
+ */
+ static ssize_t comp_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
+ {
+ int ret;
+ size_t to_copy, left;
+ struct mbo *mbo = NULL;
+ struct comp_channel *c = filp->private_data;
+
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !ch_get_mbo(c, &mbo)) {
+ mutex_unlock(&c->io_mutex);
+
+ if ((filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+ if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
+ return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
+ }
+
+ if (unlikely(!c->dev)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
+ left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
+ if (left == to_copy) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ c->mbo_offs += to_copy - left;
+ if (c->mbo_offs >= c->cfg->buffer_size ||
+ c->cfg->data_type == MOST_CH_CONTROL ||
+ c->cfg->data_type == MOST_CH_ASYNC) {
+ kfifo_skip(&c->fifo);
+ mbo->buffer_length = c->mbo_offs;
+ c->mbo_offs = 0;
+ most_submit_mbo(mbo);
+ }
+
+ ret = to_copy - left;
+ unlock:
+ mutex_unlock(&c->io_mutex);
+ return ret;
+ }
+
+ /**
+ * comp_read - implements the syscall to read from the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to read
+ * @offset: offset from where to start reading
+ */
+ static ssize_t
+ comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
+ {
+ size_t to_copy, not_copied, copied;
+ struct mbo *mbo;
+ struct comp_channel *c = filp->private_data;
+
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
+ mutex_unlock(&c->io_mutex);
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ if (wait_event_interruptible(c->wq,
+ (!kfifo_is_empty(&c->fifo) ||
+ (!c->dev))))
+ return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
+ }
+
+ /* make sure we don't submit to gone devices */
+ if (unlikely(!c->dev)) {
+ mutex_unlock(&c->io_mutex);
+ return -ENODEV;
+ }
+
+ to_copy = min_t(size_t,
+ count,
+ mbo->processed_length - c->mbo_offs);
+
+ not_copied = copy_to_user(buf,
+ mbo->virt_address + c->mbo_offs,
+ to_copy);
+
+ copied = to_copy - not_copied;
+
+ c->mbo_offs += copied;
+ if (c->mbo_offs >= mbo->processed_length) {
+ kfifo_skip(&c->fifo);
+ most_put_mbo(mbo);
+ c->mbo_offs = 0;
+ }
+ mutex_unlock(&c->io_mutex);
+ return copied;
+ }
+
- unsigned int mask = 0;
++static __poll_t comp_poll(struct file *filp, poll_table *wait)
+ {
+ struct comp_channel *c = filp->private_data;
++ __poll_t mask = 0;
+
+ poll_wait(filp, &c->wq, wait);
+
+ if (c->cfg->direction == MOST_CH_RX) {
+ if (!kfifo_is_empty(&c->fifo))
+ mask |= POLLIN | POLLRDNORM;
+ } else {
+ if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
+ mask |= POLLOUT | POLLWRNORM;
+ }
+ return mask;
+ }
+
+ /**
+ * Initialization of struct file_operations
+ */
+ static const struct file_operations channel_fops = {
+ .owner = THIS_MODULE,
+ .read = comp_read,
+ .write = comp_write,
+ .open = comp_open,
+ .release = comp_close,
+ .poll = comp_poll,
+ };
+
+ /**
+ * comp_disconnect_channel - disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the cdev that represents this
+ * channel in user space.
+ */
+ static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
+ {
+ struct comp_channel *c;
+
+ if (!iface) {
+ pr_info("Bad interface pointer\n");
+ return -EINVAL;
+ }
+
+ c = get_channel(iface, channel_id);
+ if (!c)
+ return -ENXIO;
+
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->dev = NULL;
+ spin_unlock(&c->unlink);
+ destroy_cdev(c);
+ if (c->access_ref) {
+ stop_channel(c);
+ wake_up_interruptible(&c->wq);
+ mutex_unlock(&c->io_mutex);
+ } else {
+ mutex_unlock(&c->io_mutex);
+ destroy_channel(c);
+ }
+ return 0;
+ }
+
+ /**
+ * comp_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel linked to this MBO and stores it in the local
+ * fifo buffer.
+ */
+ static int comp_rx_completion(struct mbo *mbo)
+ {
+ struct comp_channel *c;
+
+ if (!mbo)
+ return -EINVAL;
+
+ c = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ if (!c)
+ return -ENXIO;
+
+ spin_lock(&c->unlink);
+ if (!c->access_ref || !c->dev) {
+ spin_unlock(&c->unlink);
+ return -ENODEV;
+ }
+ kfifo_in(&c->fifo, &mbo, 1);
+ spin_unlock(&c->unlink);
+ #ifdef DEBUG_MESG
+ if (kfifo_is_full(&c->fifo))
+ pr_info("WARN: Fifo is full\n");
+ #endif
+ wake_up_interruptible(&c->wq);
+ return 0;
+ }
+
+ /**
+ * comp_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This wakes sleeping processes in the wait-queue.
+ */
+ static int comp_tx_completion(struct most_interface *iface, int channel_id)
+ {
+ struct comp_channel *c;
+
+ if (!iface) {
+ pr_info("Bad interface pointer\n");
+ return -EINVAL;
+ }
+ if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
+ pr_info("Channel ID out of range\n");
+ return -EINVAL;
+ }
+
+ c = get_channel(iface, channel_id);
+ if (!c)
+ return -ENXIO;
+ wake_up_interruptible(&c->wq);
+ return 0;
+ }
+
+ /**
+ * comp_probe - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @name: name of the device to be created
+ *
+ * This allocates achannel object and creates the device node in /dev
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+ static int comp_probe(struct most_interface *iface, int channel_id,
+ struct most_channel_config *cfg, char *name)
+ {
+ struct comp_channel *c;
+ unsigned long cl_flags;
+ int retval;
+ int current_minor;
+
+ if ((!iface) || (!cfg) || (!name)) {
+ pr_info("Probing component with bad arguments");
+ return -EINVAL;
+ }
+ c = get_channel(iface, channel_id);
+ if (c)
+ return -EEXIST;
+
+ current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
+ if (current_minor < 0)
+ return current_minor;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ retval = -ENOMEM;
+ goto error_alloc_channel;
+ }
+
+ c->devno = MKDEV(comp.major, current_minor);
+ cdev_init(&c->cdev, &channel_fops);
+ c->cdev.owner = THIS_MODULE;
+ cdev_add(&c->cdev, c->devno, 1);
+ c->iface = iface;
+ c->cfg = cfg;
+ c->channel_id = channel_id;
+ c->access_ref = 0;
+ spin_lock_init(&c->unlink);
+ INIT_KFIFO(c->fifo);
+ retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
+ if (retval) {
+ pr_info("failed to alloc channel kfifo");
+ goto error_alloc_kfifo;
+ }
+ init_waitqueue_head(&c->wq);
+ mutex_init(&c->io_mutex);
+ spin_lock_irqsave(&ch_list_lock, cl_flags);
+ list_add_tail(&c->list, &channel_list);
+ spin_unlock_irqrestore(&ch_list_lock, cl_flags);
+ c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
+
+ if (IS_ERR(c->dev)) {
+ retval = PTR_ERR(c->dev);
+ pr_info("failed to create new device node %s\n", name);
+ goto error_create_device;
+ }
+ kobject_uevent(&c->dev->kobj, KOBJ_ADD);
+ return 0;
+
+ error_create_device:
+ kfifo_free(&c->fifo);
+ list_del(&c->list);
+ error_alloc_kfifo:
+ cdev_del(&c->cdev);
+ kfree(c);
+ error_alloc_channel:
+ ida_simple_remove(&comp.minor_id, current_minor);
+ return retval;
+ }
+
+ static struct cdev_component comp = {
+ .cc = {
+ .name = "cdev",
+ .probe_channel = comp_probe,
+ .disconnect_channel = comp_disconnect_channel,
+ .rx_completion = comp_rx_completion,
+ .tx_completion = comp_tx_completion,
+ },
+ };
+
+ static int __init mod_init(void)
+ {
+ int err;
+
+ pr_info("init()\n");
+
+ comp.class = class_create(THIS_MODULE, "most_cdev");
+ if (IS_ERR(comp.class)) {
+ pr_info("No udev support.\n");
+ return PTR_ERR(comp.class);
+ }
+
+ INIT_LIST_HEAD(&channel_list);
+ spin_lock_init(&ch_list_lock);
+ ida_init(&comp.minor_id);
+
+ err = alloc_chrdev_region(&comp.devno, 0, 50, "cdev");
+ if (err < 0)
+ goto dest_ida;
+ comp.major = MAJOR(comp.devno);
+ err = most_register_component(&comp.cc);
+ if (err)
+ goto free_cdev;
+ return 0;
+
+ free_cdev:
+ unregister_chrdev_region(comp.devno, 1);
+ dest_ida:
+ ida_destroy(&comp.minor_id);
+ class_destroy(comp.class);
+ return err;
+ }
+
+ static void __exit mod_exit(void)
+ {
+ struct comp_channel *c, *tmp;
+
+ pr_info("exit module\n");
+
+ most_deregister_component(&comp.cc);
+
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ destroy_cdev(c);
+ destroy_channel(c);
+ }
+ unregister_chrdev_region(comp.devno, 1);
+ ida_destroy(&comp.minor_id);
+ class_destroy(comp.class);
+ }
+
+ module_init(mod_init);
+ module_exit(mod_exit);
+ MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("character device component for mostcore");
--- /dev/null
-static unsigned int comp_vdev_poll(struct file *filp, poll_table *wait)
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+ * video.c - V4L2 component for Mostcore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/device.h>
+ #include <linux/suspend.h>
+ #include <linux/videodev2.h>
+ #include <linux/mutex.h>
+ #include <media/v4l2-common.h>
+ #include <media/v4l2-ioctl.h>
+ #include <media/v4l2-event.h>
+ #include <media/v4l2-device.h>
+ #include <media/v4l2-ctrls.h>
+ #include <media/v4l2-fh.h>
+
+ #include "most/core.h"
+
+ #define V4L2_CMP_MAX_INPUT 1
+
+ static struct core_component comp;
+
+ struct most_video_dev {
+ struct most_interface *iface;
+ int ch_idx;
+ struct list_head list;
+ bool mute;
+
+ struct list_head pending_mbos;
+ spinlock_t list_lock;
+
+ struct v4l2_device v4l2_dev;
+ atomic_t access_ref;
+ struct video_device *vdev;
+ unsigned int ctrl_input;
+
+ struct mutex lock;
+
+ wait_queue_head_t wait_data;
+ };
+
+ struct comp_fh {
+ /* must be the first field of this struct! */
+ struct v4l2_fh fh;
+ struct most_video_dev *mdev;
+ u32 offs;
+ };
+
+ static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
+ static struct spinlock list_lock;
+
+ static inline bool data_ready(struct most_video_dev *mdev)
+ {
+ return !list_empty(&mdev->pending_mbos);
+ }
+
+ static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
+ {
+ return list_first_entry(&mdev->pending_mbos, struct mbo, list);
+ }
+
+ static int comp_vdev_open(struct file *filp)
+ {
+ int ret;
+ struct video_device *vdev = video_devdata(filp);
+ struct most_video_dev *mdev = video_drvdata(filp);
+ struct comp_fh *fh;
+
+ v4l2_info(&mdev->v4l2_dev, "comp_vdev_open()\n");
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+
+ if (!atomic_inc_and_test(&mdev->access_ref)) {
+ v4l2_err(&mdev->v4l2_dev, "too many clients\n");
+ ret = -EBUSY;
+ goto err_dec;
+ }
+
+ fh->mdev = mdev;
+ v4l2_fh_init(&fh->fh, vdev);
+ filp->private_data = fh;
+
+ v4l2_fh_add(&fh->fh);
+
+ ret = most_start_channel(mdev->iface, mdev->ch_idx, &comp);
+ if (ret) {
+ v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
+ goto err_rm;
+ }
+
+ return 0;
+
+ err_rm:
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+
+ err_dec:
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return ret;
+ }
+
+ static int comp_vdev_close(struct file *filp)
+ {
+ struct comp_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ struct mbo *mbo, *tmp;
+
+ v4l2_info(&mdev->v4l2_dev, "comp_vdev_close()\n");
+
+ /*
+ * We need to put MBOs back before we call most_stop_channel()
+ * to deallocate MBOs.
+ * From the other hand mostcore still calling rx_completion()
+ * to deliver MBOs until most_stop_channel() is called.
+ * Use mute to work around this issue.
+ * This must be implemented in core.
+ */
+
+ spin_lock_irq(&mdev->list_lock);
+ mdev->mute = true;
+ list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
+ list_del(&mbo->list);
+ spin_unlock_irq(&mdev->list_lock);
+ most_put_mbo(mbo);
+ spin_lock_irq(&mdev->list_lock);
+ }
+ spin_unlock_irq(&mdev->list_lock);
+ most_stop_channel(mdev->iface, mdev->ch_idx, &comp);
+ mdev->mute = false;
+
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
+
+ atomic_dec(&mdev->access_ref);
+ kfree(fh);
+ return 0;
+ }
+
+ static ssize_t comp_vdev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+ {
+ struct comp_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
+ int ret = 0;
+
+ if (*pos)
+ return -ESPIPE;
+
+ if (!mdev)
+ return -ENODEV;
+
+ /* wait for the first buffer */
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
+ return -ERESTARTSYS;
+ }
+
+ if (!data_ready(mdev))
+ return -EAGAIN;
+
+ while (count > 0 && data_ready(mdev)) {
+ struct mbo *const mbo = get_top_mbo(mdev);
+ int const rem = mbo->processed_length - fh->offs;
+ int const cnt = rem < count ? rem : count;
+
+ if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
+ v4l2_err(&mdev->v4l2_dev, "read: copy_to_user failed\n");
+ if (!ret)
+ ret = -EFAULT;
+ return ret;
+ }
+
+ fh->offs += cnt;
+ count -= cnt;
+ buf += cnt;
+ ret += cnt;
+
+ if (cnt >= rem) {
+ fh->offs = 0;
+ spin_lock_irq(&mdev->list_lock);
+ list_del(&mbo->list);
+ spin_unlock_irq(&mdev->list_lock);
+ most_put_mbo(mbo);
+ }
+ }
+ return ret;
+ }
+
- unsigned int mask = 0;
++static __poll_t comp_vdev_poll(struct file *filp, poll_table *wait)
+ {
+ struct comp_fh *fh = filp->private_data;
+ struct most_video_dev *mdev = fh->mdev;
++ __poll_t mask = 0;
+
+ /* only wait if no data is available */
+ if (!data_ready(mdev))
+ poll_wait(filp, &mdev->wait_data, wait);
+ if (data_ready(mdev))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+ }
+
+ static void comp_set_format_struct(struct v4l2_format *f)
+ {
+ f->fmt.pix.width = 8;
+ f->fmt.pix.height = 8;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = 188 * 2;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.priv = 0;
+ }
+
+ static int comp_set_format(struct most_video_dev *mdev, unsigned int cmd,
+ struct v4l2_format *format)
+ {
+ if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
+ return -EINVAL;
+
+ if (cmd == VIDIOC_TRY_FMT)
+ return 0;
+
+ comp_set_format_struct(format);
+
+ return 0;
+ }
+
+ static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_querycap()\n");
+
+ strlcpy(cap->driver, "v4l2_component", sizeof(cap->driver));
+ strlcpy(cap->card, "MOST", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "%s", mdev->iface->description);
+
+ cap->capabilities =
+ V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER |
+ V4L2_CAP_VIDEO_CAPTURE;
+ return 0;
+ }
+
+ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_enum_fmt_vid_cap() %d\n", f->index);
+
+ if (f->index)
+ return -EINVAL;
+
+ strcpy(f->description, "MPEG");
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
+ f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+ return 0;
+ }
+
+ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_g_fmt_vid_cap()\n");
+
+ comp_set_format_struct(f);
+ return 0;
+ }
+
+ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return comp_set_format(mdev, VIDIOC_TRY_FMT, f);
+ }
+
+ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ return comp_set_format(mdev, VIDIOC_S_FMT, f);
+ }
+
+ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_g_std()\n");
+
+ *norm = V4L2_STD_UNKNOWN;
+ return 0;
+ }
+
+ static int vidioc_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ if (input->index >= V4L2_CMP_MAX_INPUT)
+ return -EINVAL;
+
+ strcpy(input->name, "MOST Video");
+ input->type |= V4L2_INPUT_TYPE_CAMERA;
+ input->audioset = 0;
+
+ input->std = mdev->vdev->tvnorms;
+
+ return 0;
+ }
+
+ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+ *i = mdev->ctrl_input;
+ return 0;
+ }
+
+ static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
+ {
+ struct comp_fh *fh = priv;
+ struct most_video_dev *mdev = fh->mdev;
+
+ v4l2_info(&mdev->v4l2_dev, "vidioc_s_input(%d)\n", index);
+
+ if (index >= V4L2_CMP_MAX_INPUT)
+ return -EINVAL;
+ mdev->ctrl_input = index;
+ return 0;
+ }
+
+ static const struct v4l2_file_operations comp_fops = {
+ .owner = THIS_MODULE,
+ .open = comp_vdev_open,
+ .release = comp_vdev_close,
+ .read = comp_vdev_read,
+ .poll = comp_vdev_poll,
+ .unlocked_ioctl = video_ioctl2,
+ };
+
+ static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ };
+
+ static const struct video_device comp_videodev_template = {
+ .fops = &comp_fops,
+ .release = video_device_release,
+ .ioctl_ops = &video_ioctl_ops,
+ .tvnorms = V4L2_STD_UNKNOWN,
+ };
+
+ /**************************************************************************/
+
+ static struct most_video_dev *get_comp_dev(
+ struct most_interface *iface, int channel_idx)
+ {
+ struct most_video_dev *mdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ list_for_each_entry(mdev, &video_devices, list) {
+ if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ return mdev;
+ }
+ }
+ spin_unlock_irqrestore(&list_lock, flags);
+ return NULL;
+ }
+
+ static int comp_rx_data(struct mbo *mbo)
+ {
+ unsigned long flags;
+ struct most_video_dev *mdev =
+ get_comp_dev(mbo->ifp, mbo->hdm_channel_id);
+
+ if (!mdev)
+ return -EIO;
+
+ spin_lock_irqsave(&mdev->list_lock, flags);
+ if (unlikely(mdev->mute)) {
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
+ return -EIO;
+ }
+
+ list_add_tail(&mbo->list, &mdev->pending_mbos);
+ spin_unlock_irqrestore(&mdev->list_lock, flags);
+ wake_up_interruptible(&mdev->wait_data);
+ return 0;
+ }
+
+ static int comp_register_videodev(struct most_video_dev *mdev)
+ {
+ int ret;
+
+ v4l2_info(&mdev->v4l2_dev, "comp_register_videodev()\n");
+
+ init_waitqueue_head(&mdev->wait_data);
+
+ /* allocate and fill v4l2 video struct */
+ mdev->vdev = video_device_alloc();
+ if (!mdev->vdev)
+ return -ENOMEM;
+
+ /* Fill the video capture device struct */
+ *mdev->vdev = comp_videodev_template;
+ mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
+ mdev->vdev->lock = &mdev->lock;
+ snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
+ mdev->v4l2_dev.name);
+
+ /* Register the v4l2 device */
+ video_set_drvdata(mdev->vdev, mdev);
+ ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
+ ret);
+ video_device_release(mdev->vdev);
+ }
+
+ return ret;
+ }
+
+ static void comp_unregister_videodev(struct most_video_dev *mdev)
+ {
+ v4l2_info(&mdev->v4l2_dev, "comp_unregister_videodev()\n");
+
+ video_unregister_device(mdev->vdev);
+ }
+
+ static void comp_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+ {
+ struct most_video_dev *mdev =
+ container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
+
+ v4l2_device_unregister(v4l2_dev);
+ kfree(mdev);
+ }
+
+ static int comp_probe_channel(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *ccfg, char *name)
+ {
+ int ret;
+ struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
+
+ pr_info("comp_probe_channel(%s)\n", name);
+
+ if (mdev) {
+ pr_err("channel already linked\n");
+ return -EEXIST;
+ }
+
+ if (ccfg->direction != MOST_CH_RX) {
+ pr_err("wrong direction, expect rx\n");
+ return -EINVAL;
+ }
+
+ if (ccfg->data_type != MOST_CH_SYNC &&
+ ccfg->data_type != MOST_CH_ISOC) {
+ pr_err("wrong channel type, expect sync or isoc\n");
+ return -EINVAL;
+ }
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mutex_init(&mdev->lock);
+ atomic_set(&mdev->access_ref, -1);
+ spin_lock_init(&mdev->list_lock);
+ INIT_LIST_HEAD(&mdev->pending_mbos);
+ mdev->iface = iface;
+ mdev->ch_idx = channel_idx;
+ mdev->v4l2_dev.release = comp_v4l2_dev_release;
+
+ /* Create the v4l2_device */
+ strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
+ ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
+ if (ret) {
+ pr_err("v4l2_device_register() failed\n");
+ kfree(mdev);
+ return ret;
+ }
+
+ ret = comp_register_videodev(mdev);
+ if (ret)
+ goto err_unreg;
+
+ spin_lock_irq(&list_lock);
+ list_add(&mdev->list, &video_devices);
+ spin_unlock_irq(&list_lock);
+ v4l2_info(&mdev->v4l2_dev, "comp_probe_channel() done\n");
+ return 0;
+
+ err_unreg:
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return ret;
+ }
+
+ static int comp_disconnect_channel(struct most_interface *iface,
+ int channel_idx)
+ {
+ struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
+
+ if (!mdev) {
+ pr_err("no such channel is linked\n");
+ return -ENOENT;
+ }
+
+ v4l2_info(&mdev->v4l2_dev, "comp_disconnect_channel()\n");
+
+ spin_lock_irq(&list_lock);
+ list_del(&mdev->list);
+ spin_unlock_irq(&list_lock);
+
+ comp_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ return 0;
+ }
+
+ static struct core_component comp_info = {
+ .name = "video",
+ .probe_channel = comp_probe_channel,
+ .disconnect_channel = comp_disconnect_channel,
+ .rx_completion = comp_rx_data,
+ };
+
+ static int __init comp_init(void)
+ {
+ spin_lock_init(&list_lock);
+ return most_register_component(&comp);
+ }
+
+ static void __exit comp_exit(void)
+ {
+ struct most_video_dev *mdev, *tmp;
+
+ /*
+ * As the mostcore currently doesn't call disconnect_channel()
+ * for linked channels while we call most_deregister_component()
+ * we simulate this call here.
+ * This must be fixed in core.
+ */
+ spin_lock_irq(&list_lock);
+ list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+ list_del(&mdev->list);
+ spin_unlock_irq(&list_lock);
+
+ comp_unregister_videodev(mdev);
+ v4l2_device_disconnect(&mdev->v4l2_dev);
+ v4l2_device_put(&mdev->v4l2_dev);
+ spin_lock_irq(&list_lock);
+ }
+ spin_unlock_irq(&list_lock);
+
+ most_deregister_component(&comp_info);
+ BUG_ON(!list_empty(&video_devices));
+ }
+
+ module_init(comp_init);
+ module_exit(comp_exit);
+
+ MODULE_DESCRIPTION("V4L2 Component Module for Mostcore");
+ MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+ MODULE_LICENSE("GPL");
--- /dev/null
- return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+ // SPDX-License-Identifier: GPL-2.0
+ /*
+ * linux/fs/ncpfs/sock.c
+ *
+ * Copyright (C) 1992, 1993 Rick Sladkey
+ *
+ * Modified 1995, 1996 by Volker Lendecke to be usable for ncp
+ * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
+ *
+ */
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/time.h>
+ #include <linux/errno.h>
+ #include <linux/socket.h>
+ #include <linux/fcntl.h>
+ #include <linux/stat.h>
+ #include <linux/string.h>
+ #include <linux/sched/signal.h>
+ #include <linux/uaccess.h>
+ #include <linux/in.h>
+ #include <linux/net.h>
+ #include <linux/mm.h>
+ #include <linux/netdevice.h>
+ #include <linux/signal.h>
+ #include <linux/slab.h>
+ #include <net/scm.h>
+ #include <net/sock.h>
+ #include <linux/ipx.h>
+ #include <linux/poll.h>
+ #include <linux/file.h>
+
+ #include "ncp_fs.h"
+
+ #include "ncpsign_kernel.h"
+
+ static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
+ {
+ struct msghdr msg = {NULL, };
+ struct kvec iov = {buf, size};
++ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
++ return sock_recvmsg(sock, &msg, flags);
+ }
+
+ static int _send(struct socket *sock, const void *buff, int len)
+ {
+ struct msghdr msg = { .msg_flags = 0 };
+ struct kvec vec = {.iov_base = (void *)buff, .iov_len = len};
+ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len);
+ return sock_sendmsg(sock, &msg);
+ }
+
+ struct ncp_request_reply {
+ struct list_head req;
+ wait_queue_head_t wq;
+ atomic_t refs;
+ unsigned char* reply_buf;
+ size_t datalen;
+ int result;
+ enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
+ struct iov_iter from;
+ struct kvec tx_iov[3];
+ u_int16_t tx_type;
+ u_int32_t sign[6];
+ };
+
+ static inline struct ncp_request_reply* ncp_alloc_req(void)
+ {
+ struct ncp_request_reply *req;
+
+ req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ init_waitqueue_head(&req->wq);
+ atomic_set(&req->refs, (1));
+ req->status = RQ_IDLE;
+
+ return req;
+ }
+
+ static void ncp_req_get(struct ncp_request_reply *req)
+ {
+ atomic_inc(&req->refs);
+ }
+
+ static void ncp_req_put(struct ncp_request_reply *req)
+ {
+ if (atomic_dec_and_test(&req->refs))
+ kfree(req);
+ }
+
+ void ncp_tcp_data_ready(struct sock *sk)
+ {
+ struct ncp_server *server = sk->sk_user_data;
+
+ server->data_ready(sk);
+ schedule_work(&server->rcv.tq);
+ }
+
+ void ncp_tcp_error_report(struct sock *sk)
+ {
+ struct ncp_server *server = sk->sk_user_data;
+
+ server->error_report(sk);
+ schedule_work(&server->rcv.tq);
+ }
+
+ void ncp_tcp_write_space(struct sock *sk)
+ {
+ struct ncp_server *server = sk->sk_user_data;
+
+ /* We do not need any locking: we first set tx.creq, and then we do sendmsg,
+ not vice versa... */
+ server->write_space(sk);
+ if (server->tx.creq)
+ schedule_work(&server->tx.tq);
+ }
+
+ void ncpdgram_timeout_call(struct timer_list *t)
+ {
+ struct ncp_server *server = from_timer(server, t, timeout_tm);
+
+ schedule_work(&server->timeout_tq);
+ }
+
+ static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
+ {
+ req->result = result;
+ if (req->status != RQ_ABANDONED)
+ memcpy(req->reply_buf, server->rxbuf, req->datalen);
+ req->status = RQ_DONE;
+ wake_up_all(&req->wq);
+ ncp_req_put(req);
+ }
+
+ static void __abort_ncp_connection(struct ncp_server *server)
+ {
+ struct ncp_request_reply *req;
+
+ ncp_invalidate_conn(server);
+ del_timer(&server->timeout_tm);
+ while (!list_empty(&server->tx.requests)) {
+ req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
+
+ list_del_init(&req->req);
+ ncp_finish_request(server, req, -EIO);
+ }
+ req = server->rcv.creq;
+ if (req) {
+ server->rcv.creq = NULL;
+ ncp_finish_request(server, req, -EIO);
+ server->rcv.ptr = NULL;
+ server->rcv.state = 0;
+ }
+ req = server->tx.creq;
+ if (req) {
+ server->tx.creq = NULL;
+ ncp_finish_request(server, req, -EIO);
+ }
+ }
+
+ static inline int get_conn_number(struct ncp_reply_header *rp)
+ {
+ return rp->conn_low | (rp->conn_high << 8);
+ }
+
+ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
+ {
+ /* If req is done, we got signal, but we also received answer... */
+ switch (req->status) {
+ case RQ_IDLE:
+ case RQ_DONE:
+ break;
+ case RQ_QUEUED:
+ list_del_init(&req->req);
+ ncp_finish_request(server, req, err);
+ break;
+ case RQ_INPROGRESS:
+ req->status = RQ_ABANDONED;
+ break;
+ case RQ_ABANDONED:
+ break;
+ }
+ }
+
+ static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
+ {
+ mutex_lock(&server->rcv.creq_mutex);
+ __ncp_abort_request(server, req, err);
+ mutex_unlock(&server->rcv.creq_mutex);
+ }
+
+ static inline void __ncptcp_abort(struct ncp_server *server)
+ {
+ __abort_ncp_connection(server);
+ }
+
+ static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
+ {
+ struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT };
+ return sock_sendmsg(sock, &msg);
+ }
+
+ static void __ncptcp_try_send(struct ncp_server *server)
+ {
+ struct ncp_request_reply *rq;
+ struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT };
+ int result;
+
+ rq = server->tx.creq;
+ if (!rq)
+ return;
+
+ msg.msg_iter = rq->from;
+ result = sock_sendmsg(server->ncp_sock, &msg);
+
+ if (result == -EAGAIN)
+ return;
+
+ if (result < 0) {
+ pr_err("tcp: Send failed: %d\n", result);
+ __ncp_abort_request(server, rq, result);
+ return;
+ }
+ if (!msg_data_left(&msg)) {
+ server->rcv.creq = rq;
+ server->tx.creq = NULL;
+ return;
+ }
+ rq->from = msg.msg_iter;
+ }
+
+ static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
+ {
+ req->status = RQ_INPROGRESS;
+ h->conn_low = server->connection;
+ h->conn_high = server->connection >> 8;
+ h->sequence = ++server->sequence;
+ }
+
+ static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+ {
+ size_t signlen, len = req->tx_iov[1].iov_len;
+ struct ncp_request_header *h = req->tx_iov[1].iov_base;
+
+ ncp_init_header(server, req, h);
+ signlen = sign_packet(server,
+ req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
+ len - sizeof(struct ncp_request_header) + 1,
+ cpu_to_le32(len), req->sign);
+ if (signlen) {
+ /* NCP over UDP appends signature */
+ req->tx_iov[2].iov_base = req->sign;
+ req->tx_iov[2].iov_len = signlen;
+ }
+ iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
+ req->tx_iov + 1, signlen ? 2 : 1, len + signlen);
+ server->rcv.creq = req;
+ server->timeout_last = server->m.time_out;
+ server->timeout_retries = server->m.retry_count;
+ ncpdgram_send(server->ncp_sock, req);
+ mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
+ }
+
+ #define NCP_TCP_XMIT_MAGIC (0x446D6454)
+ #define NCP_TCP_XMIT_VERSION (1)
+ #define NCP_TCP_RCVD_MAGIC (0x744E6350)
+
+ static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+ {
+ size_t signlen, len = req->tx_iov[1].iov_len;
+ struct ncp_request_header *h = req->tx_iov[1].iov_base;
+
+ ncp_init_header(server, req, h);
+ signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
+ len - sizeof(struct ncp_request_header) + 1,
+ cpu_to_be32(len + 24), req->sign + 4) + 16;
+
+ req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
+ req->sign[1] = htonl(len + signlen);
+ req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
+ req->sign[3] = htonl(req->datalen + 8);
+ /* NCP over TCP prepends signature */
+ req->tx_iov[0].iov_base = req->sign;
+ req->tx_iov[0].iov_len = signlen;
+ iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
+ req->tx_iov, 2, len + signlen);
+
+ server->tx.creq = req;
+ __ncptcp_try_send(server);
+ }
+
+ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
+ {
+ /* we copy the data so that we do not depend on the caller
+ staying alive */
+ memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
+ req->tx_iov[1].iov_base = server->txbuf;
+
+ if (server->ncp_sock->type == SOCK_STREAM)
+ ncptcp_start_request(server, req);
+ else
+ ncpdgram_start_request(server, req);
+ }
+
+ static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
+ {
+ mutex_lock(&server->rcv.creq_mutex);
+ if (!ncp_conn_valid(server)) {
+ mutex_unlock(&server->rcv.creq_mutex);
+ pr_err("tcp: Server died\n");
+ return -EIO;
+ }
+ ncp_req_get(req);
+ if (server->tx.creq || server->rcv.creq) {
+ req->status = RQ_QUEUED;
+ list_add_tail(&req->req, &server->tx.requests);
+ mutex_unlock(&server->rcv.creq_mutex);
+ return 0;
+ }
+ __ncp_start_request(server, req);
+ mutex_unlock(&server->rcv.creq_mutex);
+ return 0;
+ }
+
+ static void __ncp_next_request(struct ncp_server *server)
+ {
+ struct ncp_request_reply *req;
+
+ server->rcv.creq = NULL;
+ if (list_empty(&server->tx.requests)) {
+ return;
+ }
+ req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
+ list_del_init(&req->req);
+ __ncp_start_request(server, req);
+ }
+
+ static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
+ {
+ if (server->info_sock) {
+ struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
+ __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)};
+ struct kvec iov[2] = {
+ {.iov_base = hdr, .iov_len = 8},
+ {.iov_base = (void *)data, .iov_len = len},
+ };
+
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE,
+ iov, 2, len + 8);
+
+ sock_sendmsg(server->info_sock, &msg);
+ }
+ }
+
+ void ncpdgram_rcv_proc(struct work_struct *work)
+ {
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, rcv.tq);
+ struct socket* sock;
+
+ sock = server->ncp_sock;
+
+ while (1) {
+ struct ncp_reply_header reply;
+ int result;
+
+ result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
+ if (result < 0) {
+ break;
+ }
+ if (result >= sizeof(reply)) {
+ struct ncp_request_reply *req;
+
+ if (reply.type == NCP_WATCHDOG) {
+ unsigned char buf[10];
+
+ if (server->connection != get_conn_number(&reply)) {
+ goto drop;
+ }
+ result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
+ if (result < 0) {
+ ncp_dbg(1, "recv failed with %d\n", result);
+ continue;
+ }
+ if (result < 10) {
+ ncp_dbg(1, "too short (%u) watchdog packet\n", result);
+ continue;
+ }
+ if (buf[9] != '?') {
+ ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
+ continue;
+ }
+ buf[9] = 'Y';
+ _send(sock, buf, sizeof(buf));
+ continue;
+ }
+ if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
+ result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
+ if (result < 0) {
+ continue;
+ }
+ info_server(server, 0, server->unexpected_packet.data, result);
+ continue;
+ }
+ mutex_lock(&server->rcv.creq_mutex);
+ req = server->rcv.creq;
+ if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
+ server->connection == get_conn_number(&reply)))) {
+ if (reply.type == NCP_POSITIVE_ACK) {
+ server->timeout_retries = server->m.retry_count;
+ server->timeout_last = NCP_MAX_RPC_TIMEOUT;
+ mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
+ } else if (reply.type == NCP_REPLY) {
+ result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
+ #ifdef CONFIG_NCPFS_PACKET_SIGNING
+ if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
+ if (result < 8 + 8) {
+ result = -EIO;
+ } else {
+ unsigned int hdrl;
+
+ result -= 8;
+ hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
+ if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
+ pr_info("Signature violation\n");
+ result = -EIO;
+ }
+ }
+ }
+ #endif
+ del_timer(&server->timeout_tm);
+ server->rcv.creq = NULL;
+ ncp_finish_request(server, req, result);
+ __ncp_next_request(server);
+ mutex_unlock(&server->rcv.creq_mutex);
+ continue;
+ }
+ }
+ mutex_unlock(&server->rcv.creq_mutex);
+ }
+ drop:;
+ _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
+ }
+ }
+
+ static void __ncpdgram_timeout_proc(struct ncp_server *server)
+ {
+ /* If timer is pending, we are processing another request... */
+ if (!timer_pending(&server->timeout_tm)) {
+ struct ncp_request_reply* req;
+
+ req = server->rcv.creq;
+ if (req) {
+ int timeout;
+
+ if (server->m.flags & NCP_MOUNT_SOFT) {
+ if (server->timeout_retries-- == 0) {
+ __ncp_abort_request(server, req, -ETIMEDOUT);
+ return;
+ }
+ }
+ /* Ignore errors */
+ ncpdgram_send(server->ncp_sock, req);
+ timeout = server->timeout_last << 1;
+ if (timeout > NCP_MAX_RPC_TIMEOUT) {
+ timeout = NCP_MAX_RPC_TIMEOUT;
+ }
+ server->timeout_last = timeout;
+ mod_timer(&server->timeout_tm, jiffies + timeout);
+ }
+ }
+ }
+
+ void ncpdgram_timeout_proc(struct work_struct *work)
+ {
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, timeout_tq);
+ mutex_lock(&server->rcv.creq_mutex);
+ __ncpdgram_timeout_proc(server);
+ mutex_unlock(&server->rcv.creq_mutex);
+ }
+
+ static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
+ {
+ int result;
+
+ if (buffer) {
+ result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
+ } else {
+ static unsigned char dummy[1024];
+
+ if (len > sizeof(dummy)) {
+ len = sizeof(dummy);
+ }
+ result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
+ }
+ if (result < 0) {
+ return result;
+ }
+ if (result > len) {
+ pr_err("tcp: bug in recvmsg (%u > %zu)\n", result, len);
+ return -EIO;
+ }
+ return result;
+ }
+
+ static int __ncptcp_rcv_proc(struct ncp_server *server)
+ {
+ /* We have to check the result, so store the complete header */
+ while (1) {
+ int result;
+ struct ncp_request_reply *req;
+ int datalen;
+ int type;
+
+ while (server->rcv.len) {
+ result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
+ if (result == -EAGAIN) {
+ return 0;
+ }
+ if (result <= 0) {
+ req = server->rcv.creq;
+ if (req) {
+ __ncp_abort_request(server, req, -EIO);
+ } else {
+ __ncptcp_abort(server);
+ }
+ if (result < 0) {
+ pr_err("tcp: error in recvmsg: %d\n", result);
+ } else {
+ ncp_dbg(1, "tcp: EOF\n");
+ }
+ return -EIO;
+ }
+ if (server->rcv.ptr) {
+ server->rcv.ptr += result;
+ }
+ server->rcv.len -= result;
+ }
+ switch (server->rcv.state) {
+ case 0:
+ if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
+ pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
+ __ncptcp_abort(server);
+ return -EIO;
+ }
+ datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
+ if (datalen < 10) {
+ pr_err("tcp: Unexpected reply len %d\n", datalen);
+ __ncptcp_abort(server);
+ return -EIO;
+ }
+ #ifdef CONFIG_NCPFS_PACKET_SIGNING
+ if (server->sign_active) {
+ if (datalen < 18) {
+ pr_err("tcp: Unexpected reply len %d\n", datalen);
+ __ncptcp_abort(server);
+ return -EIO;
+ }
+ server->rcv.buf.len = datalen - 8;
+ server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
+ server->rcv.len = 8;
+ server->rcv.state = 4;
+ break;
+ }
+ #endif
+ type = ntohs(server->rcv.buf.type);
+ #ifdef CONFIG_NCPFS_PACKET_SIGNING
+ cont:;
+ #endif
+ if (type != NCP_REPLY) {
+ if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
+ *(__u16*)(server->unexpected_packet.data) = htons(type);
+ server->unexpected_packet.len = datalen - 8;
+
+ server->rcv.state = 5;
+ server->rcv.ptr = server->unexpected_packet.data + 2;
+ server->rcv.len = datalen - 10;
+ break;
+ }
+ ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
+ skipdata2:;
+ server->rcv.state = 2;
+ skipdata:;
+ server->rcv.ptr = NULL;
+ server->rcv.len = datalen - 10;
+ break;
+ }
+ req = server->rcv.creq;
+ if (!req) {
+ ncp_dbg(1, "Reply without appropriate request\n");
+ goto skipdata2;
+ }
+ if (datalen > req->datalen + 8) {
+ pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen, req->datalen + 8);
+ server->rcv.state = 3;
+ goto skipdata;
+ }
+ req->datalen = datalen - 8;
+ ((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
+ server->rcv.ptr = server->rxbuf + 2;
+ server->rcv.len = datalen - 10;
+ server->rcv.state = 1;
+ break;
+ #ifdef CONFIG_NCPFS_PACKET_SIGNING
+ case 4:
+ datalen = server->rcv.buf.len;
+ type = ntohs(server->rcv.buf.type2);
+ goto cont;
+ #endif
+ case 1:
+ req = server->rcv.creq;
+ if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
+ if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
+ pr_err("tcp: Bad sequence number\n");
+ __ncp_abort_request(server, req, -EIO);
+ return -EIO;
+ }
+ if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
+ pr_err("tcp: Connection number mismatch\n");
+ __ncp_abort_request(server, req, -EIO);
+ return -EIO;
+ }
+ }
+ #ifdef CONFIG_NCPFS_PACKET_SIGNING
+ if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
+ if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
+ pr_err("tcp: Signature violation\n");
+ __ncp_abort_request(server, req, -EIO);
+ return -EIO;
+ }
+ }
+ #endif
+ ncp_finish_request(server, req, req->datalen);
+ nextreq:;
+ __ncp_next_request(server);
+ case 2:
+ next:;
+ server->rcv.ptr = (unsigned char*)&server->rcv.buf;
+ server->rcv.len = 10;
+ server->rcv.state = 0;
+ break;
+ case 3:
+ ncp_finish_request(server, server->rcv.creq, -EIO);
+ goto nextreq;
+ case 5:
+ info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
+ goto next;
+ }
+ }
+ }
+
+ void ncp_tcp_rcv_proc(struct work_struct *work)
+ {
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, rcv.tq);
+
+ mutex_lock(&server->rcv.creq_mutex);
+ __ncptcp_rcv_proc(server);
+ mutex_unlock(&server->rcv.creq_mutex);
+ }
+
+ void ncp_tcp_tx_proc(struct work_struct *work)
+ {
+ struct ncp_server *server =
+ container_of(work, struct ncp_server, tx.tq);
+
+ mutex_lock(&server->rcv.creq_mutex);
+ __ncptcp_try_send(server);
+ mutex_unlock(&server->rcv.creq_mutex);
+ }
+
+ static int do_ncp_rpc_call(struct ncp_server *server, int size,
+ unsigned char* reply_buf, int max_reply_size)
+ {
+ int result;
+ struct ncp_request_reply *req;
+
+ req = ncp_alloc_req();
+ if (!req)
+ return -ENOMEM;
+
+ req->reply_buf = reply_buf;
+ req->datalen = max_reply_size;
+ req->tx_iov[1].iov_base = server->packet;
+ req->tx_iov[1].iov_len = size;
+ req->tx_type = *(u_int16_t*)server->packet;
+
+ result = ncp_add_request(server, req);
+ if (result < 0)
+ goto out;
+
+ if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
+ ncp_abort_request(server, req, -EINTR);
+ result = -EINTR;
+ goto out;
+ }
+
+ result = req->result;
+
+ out:
+ ncp_req_put(req);
+
+ return result;
+ }
+
+ /*
+ * We need the server to be locked here, so check!
+ */
+
+ static int ncp_do_request(struct ncp_server *server, int size,
+ void* reply, int max_reply_size)
+ {
+ int result;
+
+ if (server->lock == 0) {
+ pr_err("Server not locked!\n");
+ return -EIO;
+ }
+ if (!ncp_conn_valid(server)) {
+ return -EIO;
+ }
+ {
+ sigset_t old_set;
+ unsigned long mask, flags;
+
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ old_set = current->blocked;
+ if (current->flags & PF_EXITING)
+ mask = 0;
+ else
+ mask = sigmask(SIGKILL);
+ if (server->m.flags & NCP_MOUNT_INTR) {
+ /* FIXME: This doesn't seem right at all. So, like,
+ we can't handle SIGINT and get whatever to stop?
+ What if we've blocked it ourselves? What about
+ alarms? Why, in fact, are we mucking with the
+ sigmask at all? -- r~ */
+ if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
+ mask |= sigmask(SIGINT);
+ if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
+ mask |= sigmask(SIGQUIT);
+ }
+ siginitsetinv(¤t->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+
+ result = do_ncp_rpc_call(server, size, reply, max_reply_size);
+
+ spin_lock_irqsave(¤t->sighand->siglock, flags);
+ current->blocked = old_set;
+ recalc_sigpending();
+ spin_unlock_irqrestore(¤t->sighand->siglock, flags);
+ }
+
+ ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
+
+ return result;
+ }
+
+ /* ncp_do_request assures that at least a complete reply header is
+ * received. It assumes that server->current_size contains the ncp
+ * request size
+ */
+ int ncp_request2(struct ncp_server *server, int function,
+ void* rpl, int size)
+ {
+ struct ncp_request_header *h;
+ struct ncp_reply_header* reply = rpl;
+ int result;
+
+ h = (struct ncp_request_header *) (server->packet);
+ if (server->has_subfunction != 0) {
+ *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
+ }
+ h->type = NCP_REQUEST;
+ /*
+ * The server shouldn't know or care what task is making a
+ * request, so we always use the same task number.
+ */
+ h->task = 2; /* (current->pid) & 0xff; */
+ h->function = function;
+
+ result = ncp_do_request(server, server->current_size, reply, size);
+ if (result < 0) {
+ ncp_dbg(1, "ncp_request_error: %d\n", result);
+ goto out;
+ }
+ server->completion = reply->completion_code;
+ server->conn_status = reply->connection_state;
+ server->reply_size = result;
+ server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
+
+ result = reply->completion_code;
+
+ if (result != 0)
+ ncp_vdbg("completion code=%x\n", result);
+ out:
+ return result;
+ }
+
+ int ncp_connect(struct ncp_server *server)
+ {
+ struct ncp_request_header *h;
+ int result;
+
+ server->connection = 0xFFFF;
+ server->sequence = 255;
+
+ h = (struct ncp_request_header *) (server->packet);
+ h->type = NCP_ALLOC_SLOT_REQUEST;
+ h->task = 2; /* see above */
+ h->function = 0;
+
+ result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
+ if (result < 0)
+ goto out;
+ server->connection = h->conn_low + (h->conn_high * 256);
+ result = 0;
+ out:
+ return result;
+ }
+
+ int ncp_disconnect(struct ncp_server *server)
+ {
+ struct ncp_request_header *h;
+
+ h = (struct ncp_request_header *) (server->packet);
+ h->type = NCP_DEALLOC_SLOT_REQUEST;
+ h->task = 2; /* see above */
+ h->function = 0;
+
+ return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
+ }
+
+ void ncp_lock_server(struct ncp_server *server)
+ {
+ mutex_lock(&server->mutex);
+ if (server->lock)
+ pr_warn("%s: was locked!\n", __func__);
+ server->lock = 1;
+ }
+
+ void ncp_unlock_server(struct ncp_server *server)
+ {
+ if (!server->lock) {
+ pr_warn("%s: was not locked!\n", __func__);
+ return;
+ }
+ server->lock = 0;
+ mutex_unlock(&server->mutex);
+ }