cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
EXPORT_SYMBOL(cpu_callout_map);
-#ifdef CONFIG_HOTPLUG_CPU
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-#else
cpumask_t cpu_possible_map;
-#endif
EXPORT_SYMBOL(cpu_possible_map);
static cpumask_t smp_commenced_mask;
cpumask_t cpu_callin_map = CPU_MASK_NONE;
cpumask_t cpu_callout_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_callout_map);
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_possible_map);
/* The per processor IRQ masks (these are usually kept in sync) */
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+
+archmrproper:
$(Q)rm -rf arch/$(ARCH)/include
archprepare: checkbin
return ret;
}
-int
-do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact);
-
asmlinkage long
sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
struct sigaction32 __user *oact, size_t sigsetsize)
p->size = size;
p->next = NULL;
p->active = 0;
+ p->commit = 0;
+ p->read = 0;
p->char_buf_ptr = (char *)(p->data);
p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
/* printk("Flip create %p\n", p); */
*tbh = t->next;
t->next = NULL;
t->used = 0;
+ t->commit = 0;
+ t->read = 0;
/* DEBUG ONLY */
memset(t->data, '*', size);
/* printk("Flip recycle %p\n", t); */
if (b != NULL) {
b->next = n;
b->active = 0;
+ b->commit = b->used;
} else
tty->buf.head = n;
tty->buf.tail = n;
unsigned long flags;
struct tty_ldisc *disc;
struct tty_buffer *tbuf;
+ int count;
+ char *char_buf;
+ unsigned char *flag_buf;
disc = tty_ldisc_ref(tty);
if (disc == NULL) /* !TTY_LDISC */
goto out;
}
spin_lock_irqsave(&tty->buf.lock, flags);
- while((tbuf = tty->buf.head) != NULL && !tbuf->active) {
+ while((tbuf = tty->buf.head) != NULL) {
+ while ((count = tbuf->commit - tbuf->read) != 0) {
+ char_buf = tbuf->char_buf_ptr + tbuf->read;
+ flag_buf = tbuf->flag_buf_ptr + tbuf->read;
+ tbuf->read += count;
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+ disc->receive_buf(tty, char_buf, flag_buf, count);
+ spin_lock_irqsave(&tty->buf.lock, flags);
+ }
+ if (tbuf->active)
+ break;
tty->buf.head = tbuf->next;
if (tty->buf.head == NULL)
tty->buf.tail = NULL;
- spin_unlock_irqrestore(&tty->buf.lock, flags);
- /* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */
- disc->receive_buf(tty, tbuf->char_buf_ptr,
- tbuf->flag_buf_ptr,
- tbuf->used);
- spin_lock_irqsave(&tty->buf.lock, flags);
tty_buffer_free(tty, tbuf);
}
spin_unlock_irqrestore(&tty->buf.lock, flags);
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
- if (tty->buf.tail != NULL)
+ if (tty->buf.tail != NULL) {
tty->buf.tail->active = 0;
+ tty->buf.tail->commit = tty->buf.tail->used;
+ }
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
In order to use this driver, you will need a firmware image for it.
You can obtain the firmware from
<http://ipw2100.sf.net/>. Once you have the firmware image, you
- will need to place it in /etc/firmware.
+ will need to place it in /lib/firmware.
You will also very likely need the Wireless Tools in order to
configure your card:
* bitmap of size NR_CPUS.
*
* #ifdef CONFIG_HOTPLUG_CPU
- * cpu_possible_map - all NR_CPUS bits set
+ * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
* cpu_present_map - has bit 'cpu' set iff cpu is populated
* cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
* #else
{
unsigned long flags;
spin_lock_irqsave(&t->buf.lock, flags);
- if (t->buf.tail != NULL)
+ if (t->buf.tail != NULL) {
t->buf.tail->active = 0;
+ t->buf.tail->commit = t->buf.tail->used;
+ }
spin_unlock_irqrestore(&t->buf.lock, flags);
schedule_work(&t->buf.work);
}
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/compat.h>
+#include <linux/ioport.h>
#include <asm/kexec.h>
/* Verify architecture specific macros are defined */
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
-int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo);
+int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+ long timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol);
int used;
int size;
int active;
+ int commit;
+ int read;
/* Data points here */
unsigned long data[0];
};
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
- if (tty->buf.tail != NULL)
+ if (tty->buf.tail != NULL) {
tty->buf.tail->active = 0;
+ tty->buf.tail->commit = tty->buf.tail->used;
+ }
spin_unlock_irqrestore(&tty->buf.lock, flags);
schedule_delayed_work(&tty->buf.work, 1);
}
/* May be different when we get VFIR */
#define LAP_MAX_HEADER (LAP_ADDR_HEADER + LAP_CTRL_HEADER)
+/* Each IrDA device gets a random 32 bits IRLAP device address */
+#define LAP_ALEN 4
+
#define BROADCAST 0xffffffff /* Broadcast device address */
#define CBROADCAST 0xfe /* Connection broadcast address */
#define XID_FORMAT 0x01 /* Discovery XID format */
extern char __initramfs_start[], __initramfs_end[];
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
+#include <linux/kexec.h>
static void __init free_initrd(void)
{
- free_initrd_mem(initrd_start, initrd_end);
+#ifdef CONFIG_KEXEC
+ unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
+ unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
+
+ /*
+ * If the initrd region is overlapped with crashkernel reserved region,
+ * free only memory that is not part of crashkernel region.
+ */
+ if (initrd_start < crashk_end && initrd_end > crashk_start) {
+ /*
+ * Initialize initrd memory region since the kexec boot does
+ * not do.
+ */
+ memset((void *)initrd_start, 0, initrd_end - initrd_start);
+ if (initrd_start < crashk_start)
+ free_initrd_mem(initrd_start, crashk_start);
+ if (initrd_end > crashk_end)
+ free_initrd_mem(crashk_end, initrd_end);
+ } else
+#endif
+ free_initrd_mem(initrd_start, initrd_end);
+
initrd_start = 0;
initrd_end = 0;
}
*/
child_reaper = current;
- /* Sets up cpus_possible() */
smp_prepare_cpus(max_cpus);
do_pre_smp_initcalls();
goto out;
}
- ret = netlink_attachskb(sock, nc, 0, MAX_SCHEDULE_TIMEOUT);
+ ret = netlink_attachskb(sock, nc, 0,
+ MAX_SCHEDULE_TIMEOUT, NULL);
if (ret == 1)
goto retry;
if (ret) {
* could possibly have landed at. Also cast things to loff_t to
* prevent overflows and make comparisions vs. equal-width types.
*/
+ size = PAGE_ALIGN(size);
while (vma && (loff_t)(vma->vm_end - addr) <= size) {
next = vma->vm_next;
#endif
local_irq_enable();
for (i = 0;;) {
+ touch_softlockup_watchdog();
i += panic_blink(i);
mdelay(1);
i++;
*/
unsigned long nr_running;
#ifdef CONFIG_SMP
- unsigned long prio_bias;
unsigned long cpu_load[3];
#endif
unsigned long long nr_switches;
return prio;
}
-#ifdef CONFIG_SMP
-static inline void inc_prio_bias(runqueue_t *rq, int prio)
-{
- rq->prio_bias += MAX_PRIO - prio;
-}
-
-static inline void dec_prio_bias(runqueue_t *rq, int prio)
-{
- rq->prio_bias -= MAX_PRIO - prio;
-}
-
-static inline void inc_nr_running(task_t *p, runqueue_t *rq)
-{
- rq->nr_running++;
- if (rt_task(p)) {
- if (p != rq->migration_thread)
- /*
- * The migration thread does the actual balancing. Do
- * not bias by its priority as the ultra high priority
- * will skew balancing adversely.
- */
- inc_prio_bias(rq, p->prio);
- } else
- inc_prio_bias(rq, p->static_prio);
-}
-
-static inline void dec_nr_running(task_t *p, runqueue_t *rq)
-{
- rq->nr_running--;
- if (rt_task(p)) {
- if (p != rq->migration_thread)
- dec_prio_bias(rq, p->prio);
- } else
- dec_prio_bias(rq, p->static_prio);
-}
-#else
-static inline void inc_prio_bias(runqueue_t *rq, int prio)
-{
-}
-
-static inline void dec_prio_bias(runqueue_t *rq, int prio)
-{
-}
-
-static inline void inc_nr_running(task_t *p, runqueue_t *rq)
-{
- rq->nr_running++;
-}
-
-static inline void dec_nr_running(task_t *p, runqueue_t *rq)
-{
- rq->nr_running--;
-}
-#endif
-
/*
* __activate_task - move a task to the runqueue.
*/
static inline void __activate_task(task_t *p, runqueue_t *rq)
{
enqueue_task(p, rq->active);
- inc_nr_running(p, rq);
+ rq->nr_running++;
}
/*
static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
{
enqueue_task_head(p, rq->active);
- inc_nr_running(p, rq);
+ rq->nr_running++;
}
static int recalc_task_prio(task_t *p, unsigned long long now)
*/
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
- dec_nr_running(p, rq);
+ rq->nr_running--;
dequeue_task(p, p->array);
p->array = NULL;
}
* We want to under-estimate the load of migration sources, to
* balance conservatively.
*/
-static unsigned long __source_load(int cpu, int type, enum idle_type idle)
+static inline unsigned long source_load(int cpu, int type)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long running = rq->nr_running;
- unsigned long source_load, cpu_load = rq->cpu_load[type-1],
- load_now = running * SCHED_LOAD_SCALE;
-
+ unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
if (type == 0)
- source_load = load_now;
- else
- source_load = min(cpu_load, load_now);
-
- if (running > 1 || (idle == NOT_IDLE && running))
- /*
- * If we are busy rebalancing the load is biased by
- * priority to create 'nice' support across cpus. When
- * idle rebalancing we should only bias the source_load if
- * there is more than one task running on that queue to
- * prevent idle rebalance from trying to pull tasks from a
- * queue with only one running task.
- */
- source_load = source_load * rq->prio_bias / running;
+ return load_now;
- return source_load;
-}
-
-static inline unsigned long source_load(int cpu, int type)
-{
- return __source_load(cpu, type, NOT_IDLE);
+ return min(rq->cpu_load[type-1], load_now);
}
/*
* Return a high guess at the load of a migration-target cpu
*/
-static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
+static inline unsigned long target_load(int cpu, int type)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long running = rq->nr_running;
- unsigned long target_load, cpu_load = rq->cpu_load[type-1],
- load_now = running * SCHED_LOAD_SCALE;
-
+ unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
if (type == 0)
- target_load = load_now;
- else
- target_load = max(cpu_load, load_now);
+ return load_now;
- if (running > 1 || (idle == NOT_IDLE && running))
- target_load = target_load * rq->prio_bias / running;
-
- return target_load;
-}
-
-static inline unsigned long target_load(int cpu, int type)
-{
- return __target_load(cpu, type, NOT_IDLE);
+ return max(rq->cpu_load[type-1], load_now);
}
/*
list_add_tail(&p->run_list, ¤t->run_list);
p->array = current->array;
p->array->nr_active++;
- inc_nr_running(p, rq);
+ rq->nr_running++;
}
set_need_resched();
} else
runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
{
dequeue_task(p, src_array);
- dec_nr_running(p, src_rq);
+ src_rq->nr_running--;
set_task_cpu(p, this_cpu);
- inc_nr_running(p, this_rq);
+ this_rq->nr_running++;
enqueue_task(p, this_array);
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
/* Bias balancing toward cpus of our domain */
if (local_group)
- load = __target_load(i, load_idx, idle);
+ load = target_load(i, load_idx);
else
- load = __source_load(i, load_idx, idle);
+ load = source_load(i, load_idx);
avg_load += load;
}
int i;
for_each_cpu_mask(i, group->cpumask) {
- load = __source_load(i, 0, idle);
+ load = source_load(i, 0);
if (load > max_load) {
max_load = load;
goto out_unlock;
}
array = p->array;
- if (array) {
+ if (array)
dequeue_task(p, array);
- dec_prio_bias(rq, p->static_prio);
- }
old_prio = p->prio;
new_prio = NICE_TO_PRIO(nice);
if (array) {
enqueue_task(p, array);
- inc_prio_bias(rq, p->static_prio);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
BUG();
}
+ /*
+ * Prevent CPUs from coming and going.
+ * lock_cpu_hotplug() nests outside cache_chain_mutex
+ */
+ lock_cpu_hotplug();
+
mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) {
cachep->dtor = dtor;
cachep->name = name;
- /* Don't let CPUs to come and go */
- lock_cpu_hotplug();
if (g_cpucache_up == FULL) {
enable_cpucache(cachep);
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
- unlock_cpu_hotplug();
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
mutex_unlock(&cache_chain_mutex);
+ unlock_cpu_hotplug();
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
struct address_space *mapping = page_mapping(page);
if (page_mapped(page) && mapping)
- if (try_to_unmap(page, 0) != SWAP_SUCCESS)
+ if (try_to_unmap(page, 1) != SWAP_SUCCESS)
goto unlock_retry;
if (PageDirty(page)) {
* pages are swapped out.
*
* The function returns after 10 attempts or if no pages
- * are movable anymore because t has become empty
+ * are movable anymore because to has become empty
* or no retryable pages exist anymore.
*
* Return: Number of pages not migrated when "to" ran empty.
goto unlock_both;
if (mapping->a_ops->migratepage) {
+ /*
+ * Most pages have a mapping and most filesystems
+ * should provide a migration function. Anonymous
+ * pages are part of swap space which also has its
+ * own migration function. This is the most common
+ * path for page migration.
+ */
rc = mapping->a_ops->migratepage(newpage, page);
goto unlock_both;
}
/*
- * Trigger writeout if page is dirty
+ * Default handling if a filesystem does not provide
+ * a migration function. We can only migrate clean
+ * pages so try to write out any dirty pages first.
*/
if (PageDirty(page)) {
switch (pageout(page, mapping)) {
; /* try to migrate the page below */
}
}
+
/*
- * If we have no buffer or can release the buffer
- * then do a simple migration.
+ * Buffers are managed in a filesystem specific way.
+ * We must have no buffers or drop them.
*/
if (!page_has_buffers(page) ||
try_to_release_page(page, GFP_KERNEL)) {
* swap them out.
*/
if (pass > 4) {
+ /*
+ * Persistently unable to drop buffers..... As a
+ * measure of last resort we fall back to
+ * swap_page().
+ */
unlock_page(newpage);
newpage = NULL;
rc = swap_page(page);
*/
static void port_carrier_check(void *arg)
{
- struct net_bridge_port *p = arg;
+ struct net_device *dev = arg;
+ struct net_bridge_port *p;
rtnl_lock();
+ p = dev->br_port;
+ if (!p)
+ goto done;
+
if (netif_carrier_ok(p->dev)) {
u32 cost = port_cost(p->dev);
br_stp_disable_port(p);
spin_unlock_bh(&p->br->lock);
}
+done:
rtnl_unlock();
}
+static void release_nbp(struct kobject *kobj)
+{
+ struct net_bridge_port *p
+ = container_of(kobj, struct net_bridge_port, kobj);
+ kfree(p);
+}
+
+static struct kobj_type brport_ktype = {
+#ifdef CONFIG_SYSFS
+ .sysfs_ops = &brport_sysfs_ops,
+#endif
+ .release = release_nbp,
+};
+
static void destroy_nbp(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
- dev->br_port = NULL;
p->br = NULL;
p->dev = NULL;
dev_put(dev);
- br_sysfs_freeif(p);
+ kobject_put(&p->kobj);
}
static void destroy_nbp_rcu(struct rcu_head *head)
struct net_bridge *br = p->br;
struct net_device *dev = p->dev;
- /* Race between RTNL notify and RCU callback */
- if (p->deleted)
- return;
+ sysfs_remove_link(&br->ifobj, dev->name);
dev_set_promiscuity(dev, -1);
cancel_delayed_work(&p->carrier_check);
- flush_scheduled_work();
spin_lock_bh(&br->lock);
br_stp_disable_port(p);
- p->deleted = 1;
spin_unlock_bh(&br->lock);
br_fdb_delete_by_port(br, p);
list_del_rcu(&p->list);
+ rcu_assign_pointer(dev->br_port, NULL);
+
+ kobject_del(&p->kobj);
+
call_rcu(&p->rcu, destroy_nbp_rcu);
}
struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) {
- br_sysfs_removeif(p);
del_nbp(p);
}
p->dev = dev;
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
- dev->br_port = p;
p->port_no = index;
br_init_port(p);
p->state = BR_STATE_DISABLED;
- INIT_WORK(&p->carrier_check, port_carrier_check, p);
+ INIT_WORK(&p->carrier_check, port_carrier_check, dev);
kobject_init(&p->kobj);
+ kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
+ p->kobj.ktype = &brport_ktype;
+ p->kobj.parent = &(dev->class_dev.kobj);
+ p->kobj.kset = NULL;
+
return p;
}
if (dev->br_port != NULL)
return -EBUSY;
- if (IS_ERR(p = new_nbp(br, dev)))
+ p = new_nbp(br, dev);
+ if (IS_ERR(p))
return PTR_ERR(p);
- if ((err = br_fdb_insert(br, p, dev->dev_addr)))
- destroy_nbp(p);
-
- else if ((err = br_sysfs_addif(p)))
- del_nbp(p);
- else {
- dev_set_promiscuity(dev, 1);
+ err = kobject_add(&p->kobj);
+ if (err)
+ goto err0;
- list_add_rcu(&p->list, &br->port_list);
+ err = br_fdb_insert(br, p, dev->dev_addr);
+ if (err)
+ goto err1;
- spin_lock_bh(&br->lock);
- br_stp_recalculate_bridge_id(br);
- br_features_recompute(br);
- if ((br->dev->flags & IFF_UP)
- && (dev->flags & IFF_UP) && netif_carrier_ok(dev))
- br_stp_enable_port(p);
- spin_unlock_bh(&br->lock);
+ err = br_sysfs_addif(p);
+ if (err)
+ goto err2;
- dev_set_mtu(br->dev, br_min_mtu(br));
- }
+ rcu_assign_pointer(dev->br_port, p);
+ dev_set_promiscuity(dev, 1);
+
+ list_add_rcu(&p->list, &br->port_list);
+ spin_lock_bh(&br->lock);
+ br_stp_recalculate_bridge_id(br);
+ br_features_recompute(br);
+ schedule_delayed_work(&p->carrier_check, BR_PORT_DEBOUNCE);
+ spin_unlock_bh(&br->lock);
+
+ dev_set_mtu(br->dev, br_min_mtu(br));
+ kobject_uevent(&p->kobj, KOBJ_ADD);
+
+ return 0;
+err2:
+ br_fdb_delete_by_port(br, p);
+err1:
+ kobject_del(&p->kobj);
+err0:
+ kobject_put(&p->kobj);
return err;
}
if (!p || p->br != br)
return -EINVAL;
- br_sysfs_removeif(p);
del_nbp(p);
spin_lock_bh(&br->lock);
int br_handle_frame_finish(struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
- struct net_bridge_port *p = skb->dev->br_port;
- struct net_bridge *br = p->br;
+ struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+ struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
int passedup = 0;
+ if (!p || p->state == BR_STATE_DISABLED)
+ goto drop;
+
/* insert into forwarding database after filtering to avoid spoofing */
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+ br = p->br;
+ br_fdb_update(br, p, eth_hdr(skb)->h_source);
- if (p->state == BR_STATE_LEARNING) {
- kfree_skb(skb);
- goto out;
- }
+ if (p->state == BR_STATE_LEARNING)
+ goto drop;
if (br->dev->flags & IFF_PROMISC) {
struct sk_buff *skb2;
out:
return 0;
+drop:
+ kfree_skb(skb);
+ goto out;
}
/*
#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr)
#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr)
-#define has_bridge_parent(device) ((device)->br_port != NULL)
-#define bridge_parent(device) ((device)->br_port->br->dev)
-
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables = 1;
.rt_flags = 0,
};
+static inline struct net_device *bridge_parent(const struct net_device *dev)
+{
+ struct net_bridge_port *port = rcu_dereference(dev->br_port);
+
+ return port ? port->br->dev : NULL;
+}
/* PF_BRIDGE/PRE_ROUTING *********************************************/
/* Undo the changes made for ip6tables PREROUTING and continue the
skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
skb->dev = bridge_parent(skb->dev);
- if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
- skb_pull(skb, VLAN_HLEN);
- skb->nh.raw += VLAN_HLEN;
+ if (!skb->dev)
+ kfree_skb(skb);
+ else {
+ if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+ skb_pull(skb, VLAN_HLEN);
+ skb->nh.raw += VLAN_HLEN;
+ }
+ skb->dst->output(skb);
}
- skb->dst->output(skb);
return 0;
}
}
/* Some common code for IPv4/IPv6 */
-static void setup_pre_routing(struct sk_buff *skb)
+static struct net_device *setup_pre_routing(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
skb->dev = bridge_parent(skb->dev);
+
+ return skb->dev;
}
/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
nf_bridge_put(skb->nf_bridge);
if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
return NF_DROP;
- setup_pre_routing(skb);
+ if (!setup_pre_routing(skb))
+ return NF_DROP;
NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
nf_bridge_put(skb->nf_bridge);
if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
return NF_DROP;
- setup_pre_routing(skb);
+ if (!setup_pre_routing(skb))
+ return NF_DROP;
store_orig_dstaddr(skb);
NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
struct sk_buff *skb = *pskb;
struct nf_bridge_info *nf_bridge;
struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+ struct net_device *parent;
int pf;
if (!skb->nf_bridge)
return NF_ACCEPT;
+ parent = bridge_parent(out);
+ if (!parent)
+ return NF_DROP;
+
if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
pf = PF_INET;
else
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
- NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in),
- bridge_parent(out), br_nf_forward_finish);
+ NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), parent,
+ br_nf_forward_finish);
return NF_STOLEN;
}
goto out;
}
realoutdev = bridge_parent(skb->dev);
+ if (!realoutdev)
+ return NF_DROP;
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
/* iptables should match -o br0.x */
/* IP forwarded traffic has a physindev, locally
* generated traffic hasn't. */
if (realindev != NULL) {
- if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) &&
- has_bridge_parent(realindev))
- realindev = bridge_parent(realindev);
+ if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) ) {
+ struct net_device *parent = bridge_parent(realindev);
+ if (parent)
+ realindev = parent;
+ }
NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev,
realoutdev, br_nf_local_out_finish,
if (!nf_bridge)
return NF_ACCEPT;
+ if (!realoutdev)
+ return NF_DROP;
+
if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
pf = PF_INET;
else
/* STP */
u8 priority;
u8 state;
- u8 deleted;
u16 port_no;
unsigned char topology_change_ack;
unsigned char config_pending;
#ifdef CONFIG_SYSFS
/* br_sysfs_if.c */
+extern struct sysfs_ops brport_sysfs_ops;
extern int br_sysfs_addif(struct net_bridge_port *p);
-extern void br_sysfs_removeif(struct net_bridge_port *p);
-extern void br_sysfs_freeif(struct net_bridge_port *p);
/* br_sysfs_br.c */
extern int br_sysfs_addbr(struct net_device *dev);
#else
#define br_sysfs_addif(p) (0)
-#define br_sysfs_removeif(p) do { } while(0)
-#define br_sysfs_freeif(p) kfree(p)
#define br_sysfs_addbr(dev) (0)
#define br_sysfs_delbr(dev) do { } while(0)
#endif /* CONFIG_SYSFS */
static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
-/* NO locks */
+/* NO locks, but rcu_read_lock (preempt_disabled) */
int br_stp_handle_bpdu(struct sk_buff *skb)
{
- struct net_bridge_port *p = skb->dev->br_port;
- struct net_bridge *br = p->br;
+ struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
+ struct net_bridge *br;
unsigned char *buf;
+ if (!p)
+ goto err;
+
+ br = p->br;
+ spin_lock(&br->lock);
+
+ if (p->state == BR_STATE_DISABLED || !(br->dev->flags & IFF_UP))
+ goto out;
+
/* insert into forwarding database after filtering to avoid spoofing */
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
+ br_fdb_update(br, p, eth_hdr(skb)->h_source);
+
+ if (!br->stp_enabled)
+ goto out;
/* need at least the 802 and STP headers */
if (!pskb_may_pull(skb, sizeof(header)+1) ||
memcmp(skb->data, header, sizeof(header)))
- goto err;
+ goto out;
buf = skb_pull(skb, sizeof(header));
- spin_lock_bh(&br->lock);
- if (p->state == BR_STATE_DISABLED
- || !(br->dev->flags & IFF_UP)
- || !br->stp_enabled)
- goto out;
-
if (buf[0] == BPDU_TYPE_CONFIG) {
struct br_config_bpdu bpdu;
br_received_tcn_bpdu(p);
}
out:
- spin_unlock_bh(&br->lock);
+ spin_unlock(&br->lock);
err:
kfree_skb(skb);
return 0;
return ret;
}
-/* called from kobject_put when port ref count goes to zero. */
-static void brport_release(struct kobject *kobj)
-{
- kfree(container_of(kobj, struct net_bridge_port, kobj));
-}
-
-static struct sysfs_ops brport_sysfs_ops = {
+struct sysfs_ops brport_sysfs_ops = {
.show = brport_show,
.store = brport_store,
};
-static struct kobj_type brport_ktype = {
- .sysfs_ops = &brport_sysfs_ops,
- .release = brport_release,
-};
-
-
/*
* Add sysfs entries to ethernet device added to a bridge.
* Creates a brport subdirectory with bridge attributes.
struct brport_attribute **a;
int err;
- ASSERT_RTNL();
-
- kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
- p->kobj.ktype = &brport_ktype;
- p->kobj.parent = &(p->dev->class_dev.kobj);
- p->kobj.kset = NULL;
-
- err = kobject_add(&p->kobj);
- if(err)
- goto out1;
-
err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj,
SYSFS_BRIDGE_PORT_LINK);
if (err)
goto out2;
}
- err = sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name);
- if (err)
- goto out2;
-
- kobject_uevent(&p->kobj, KOBJ_ADD);
- return 0;
- out2:
- kobject_del(&p->kobj);
- out1:
+ err= sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name);
+out2:
return err;
}
-
-void br_sysfs_removeif(struct net_bridge_port *p)
-{
- pr_debug("br_sysfs_removeif\n");
- sysfs_remove_link(&p->br->ifobj, p->dev->name);
- kobject_uevent(&p->kobj, KOBJ_REMOVE);
- kobject_del(&p->kobj);
-}
-
-void br_sysfs_freeif(struct net_bridge_port *p)
-{
- pr_debug("br_sysfs_freeif\n");
- kobject_put(&p->kobj);
-}
if (!skb)
return;
- if (rtnetlink_fill_ifinfo(skb, dev, type, current->pid, 0, change, 0) < 0) {
+ if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change, 0) < 0) {
kfree_skb(skb);
return;
}
if (!skb)
netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS);
- else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) {
+ else if (inet_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL);
} else {
}
nl->nlmsg_flags = NLM_F_REQUEST;
- nl->nlmsg_pid = current->pid;
+ nl->nlmsg_pid = 0;
nl->nlmsg_seq = 0;
nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm));
if (cmd == SIOCDELRT) {
tp->rcvq_space.space = space;
- if (sysctl_tcp_moderate_rcvbuf) {
+ if (sysctl_tcp_moderate_rcvbuf &&
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int new_clamp = space;
/* Receive space grows, normalize in order to
static void irda_device_setup(struct net_device *dev)
{
dev->hard_header_len = 0;
- dev->addr_len = 0;
+ dev->addr_len = LAP_ALEN;
dev->type = ARPHRD_IRDA;
dev->tx_queue_len = 8; /* Window size + 1 s-frame */
- memset(dev->broadcast, 0xff, 4);
+ memset(dev->broadcast, 0xff, LAP_ALEN);
dev->mtu = 2048;
dev->flags = IFF_NOARP;
{
/* Yes !!! Get it.. */
strlcpy(self->rname, discoveries[i].info, sizeof(self->rname));
- self->rname[NICKNAME_MAX_LEN + 1] = '\0';
+ self->rname[sizeof(self->rname) - 1] = '\0';
DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n",
self->daddr, self->rname);
kfree(discoveries);
* 0: continue
* 1: repeat lookup - reference dropped while waiting for socket memory.
*/
-int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
+int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+ long timeo, struct sock *ssk)
{
struct netlink_sock *nlk;
test_bit(0, &nlk->state)) {
DECLARE_WAITQUEUE(wait, current);
if (!timeo) {
- if (!nlk->pid)
+ if (!ssk || nlk_sk(ssk)->pid == 0)
netlink_overrun(sk);
sock_put(sk);
kfree_skb(skb);
kfree_skb(skb);
return PTR_ERR(sk);
}
- err = netlink_attachskb(sk, skb, nonblock, timeo);
+ err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
if (err == 1)
goto retry;
if (err)