* in detail.
*/
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(dev_base_lock)
{
struct net *net = seq_file_net(seq);
loff_t off;
}
void dev_seq_stop(struct seq_file *seq, void *v)
+ __releases(dev_base_lock)
{
read_unlock(&dev_base_lock);
}
#ifdef CONFIG_PROC_FS
static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(dev_base_lock)
{
struct net *net = seq_file_net(seq);
struct net_device *dev;
}
static void dev_mc_seq_stop(struct seq_file *seq, void *v)
+ __releases(dev_base_lock)
{
read_unlock(&dev_base_lock);
}
int
gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
int xstats_type, spinlock_t *lock, struct gnet_dump *d)
+ __acquires(lock)
{
memset(d, 0, sizeof(*d));
}
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+ __acquires(tbl->lock)
{
struct neigh_seq_state *state = seq->private;
loff_t pos_minus_one;
EXPORT_SYMBOL(neigh_seq_next);
void neigh_seq_stop(struct seq_file *seq, void *v)
+ __releases(tbl->lock)
{
struct neigh_seq_state *state = seq->private;
struct neigh_table *tbl = state->tbl;
#ifdef CONFIG_PROC_FS
static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(proto_list_lock)
{
read_lock(&proto_list_lock);
return seq_list_start_head(&proto_list, *pos);
}
static void proto_seq_stop(struct seq_file *seq, void *v)
+ __releases(proto_list_lock)
{
read_unlock(&proto_list_lock);
}
}
static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(fib_hash_lock)
{
void *v = NULL;
}
static void fib_seq_stop(struct seq_file *seq, void *v)
+ __releases(fib_hash_lock)
{
read_unlock(&fib_hash_lock);
}
* exclusive lock release). It should be ifdefed really.
*/
void inet_listen_wlock(struct inet_hashinfo *hashinfo)
+ __acquires(hashinfo->lhash_lock)
{
write_lock(&hashinfo->lhash_lock);
}
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(udp_hash_lock)
{
read_lock(&udp_hash_lock);
return *pos ? udp_get_idx(seq, *pos-1) : (void *)1;
}
static void udp_seq_stop(struct seq_file *seq, void *v)
+ __releases(udp_hash_lock)
{
read_unlock(&udp_hash_lock);
}
}
static void *ac6_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(dev_base_lock)
{
read_lock(&dev_base_lock);
return ac6_get_idx(seq, *pos);
}
static void ac6_seq_stop(struct seq_file *seq, void *v)
+ __releases(dev_base_lock)
{
struct ac6_iter_state *state = ac6_seq_private(seq);
if (likely(state->idev != NULL)) {
}
static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(ip6_fl_lock)
{
read_lock_bh(&ip6_fl_lock);
return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void ip6fl_seq_stop(struct seq_file *seq, void *v)
+ __releases(ip6_fl_lock)
{
read_unlock_bh(&ip6_fl_lock);
}
}
static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(dev_base_lock)
{
read_lock(&dev_base_lock);
return igmp6_mc_get_idx(seq, *pos);
}
static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
+ __releases(dev_base_lock)
{
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
if (likely(state->idev != NULL)) {
}
static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(dev_base_lock)
{
read_lock(&dev_base_lock);
return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
+ __releases(dev_base_lock)
{
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
if (likely(state->im != NULL)) {
*/
static void netlink_table_grab(void)
+ __acquires(nl_table_lock)
{
write_lock_irq(&nl_table_lock);
}
static inline void netlink_table_ungrab(void)
+ __releases(nl_table_lock)
{
write_unlock_irq(&nl_table_lock);
wake_up(&nl_table_wait);
}
static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(nl_table_lock)
{
read_lock(&nl_table_lock);
return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void netlink_seq_stop(struct seq_file *seq, void *v)
+ __releases(nl_table_lock)
{
read_unlock(&nl_table_lock);
}
*/
void qdisc_lock_tree(struct net_device *dev)
+ __acquires(dev->queue_lock)
+ __acquires(dev->ingress_lock)
{
spin_lock_bh(&dev->queue_lock);
spin_lock(&dev->ingress_lock);
}
void qdisc_unlock_tree(struct net_device *dev)
+ __releases(dev->ingress_lock)
+ __releases(dev->queue_lock)
{
spin_unlock(&dev->ingress_lock);
spin_unlock_bh(&dev->queue_lock);
};
static void *c_start(struct seq_file *m, loff_t *pos)
+ __acquires(cd->hash_lock)
{
loff_t n = *pos;
unsigned hash, entry;
}
static void c_stop(struct seq_file *m, void *p)
+ __releases(cd->hash_lock)
{
struct cache_detail *cd = ((struct handle*)m->private)->cd;
read_unlock(&cd->hash_lock);
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(unix_table_lock)
{
struct unix_iter_state *iter = seq->private;
spin_lock(&unix_table_lock);
}
static void unix_seq_stop(struct seq_file *seq, void *v)
+ __releases(unix_table_lock)
{
spin_unlock(&unix_table_lock);
}
}
static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo)
+ __releases(xfrm_state_afinfo_lock)
{
write_unlock_bh(&xfrm_state_afinfo_lock);
}
}
static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
+ __releases(xfrm_state_afinfo_lock)
{
read_unlock(&xfrm_state_afinfo_lock);
}