--- ../v2.4.19/linux/include/linux/pkt_cls.h Fri Jun 2 07:25:46 2000 +++ linux/include/linux/pkt_cls.h Wed Jul 31 00:00:59 2002 @@ -158,4 +158,5 @@ #define TCA_TCINDEX_MAX TCA_TCINDEX_POLICE + #endif --- ../v2.4.19/linux/include/linux/pkt_sched.h Fri Jun 2 07:28:24 2000 +++ linux/include/linux/pkt_sched.h Sun Aug 4 14:54:40 2002 @@ -38,9 +38,6 @@ __u32 pps; /* Current flow packet rate */ __u32 qlen; __u32 backlog; -#ifdef __KERNEL__ - spinlock_t *lock; -#endif }; struct tc_estimator @@ -248,6 +245,7 @@ __u8 grio; }; + /* CBQ section */ #define TC_CBQ_MAXPRIO 8 @@ -332,6 +330,7 @@ #define TCA_CBQ_MAX TCA_CBQ_POLICE + /* dsmark section */ enum { @@ -344,19 +343,5 @@ }; #define TCA_DSMARK_MAX TCA_DSMARK_VALUE - -/* ATM section */ - -enum { - TCA_ATM_UNSPEC, - TCA_ATM_FD, /* file/socket descriptor */ - TCA_ATM_PTR, /* pointer to descriptor - later */ - TCA_ATM_HDR, /* LL header */ - TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */ - TCA_ATM_ADDR, /* PVC address (for output only) */ - TCA_ATM_STATE /* VC state (ATM_VS_*; for output only) */ -}; - -#define TCA_ATM_MAX TCA_ATM_STATE #endif --- ../v2.4.19/linux/include/net/pkt_sched.h Sat Aug 3 09:13:50 2002 +++ linux/include/net/pkt_sched.h Wed Aug 7 00:23:36 2002 @@ -7,15 +7,10 @@ #define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES -#include -#include #include +#include #include -#ifdef CONFIG_X86_TSC -#include -#endif - struct rtattr; struct Qdisc; @@ -69,10 +64,16 @@ int (*dump)(struct Qdisc *, struct sk_buff *); }; -extern rwlock_t qdisc_tree_lock; +struct Qdisc_head +{ + struct Qdisc_head *forw; +}; + +extern struct Qdisc_head qdisc_head; struct Qdisc { + struct Qdisc_head h; int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev); struct sk_buff * (*dequeue)(struct Qdisc *dev); unsigned flags; @@ -84,9 +85,11 @@ u32 handle; atomic_t refcnt; struct sk_buff_head q; - struct net_device *dev; + struct device *dev; struct tc_stats stats; + unsigned long tx_timeo; + unsigned long tx_last; int (*reshape_fail)(struct sk_buff *skb, struct Qdisc *q); /* This field is deprecated, but it is still used by CBQ @@ -105,28 +108,86 @@ int refcnt; }; +#ifndef MODULE_LICENSE +#define MODULE_LICENSE(X) +#endif + +#ifndef NET_XMIT_SUCCESS +#define NET_XMIT_SUCCESS 0 +#define NET_XMIT_DROP 1 /* skb dropped */ +#define NET_XMIT_CN 2 /* congestion notification */ +#define NET_XMIT_POLICED 3 /* skb is shot by police */ +#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; + (TC use only - dev_queue_xmit + returns this as NET_XMIT_SUCCESS) */ +#endif + +#define likely(e) (e) +#define unlikely(e) (e) + +#ifndef min_t +#define min_t(type,x,y) \ + ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; }) +#define max_t(type,x,y) \ + ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; }) +#endif + +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +static inline void __skb_queue_purge(struct sk_buff_head *list) +{ + struct sk_buff *skb; + while ((skb=__skb_dequeue(list))!=NULL) + kfree_skb(skb); +} +#define del_timer_sync(t) del_timer(t) + +#define netif_schedule qdisc_wakeup +#define netif_queue_stopped(D) (D->tbusy) +#ifndef BUG_TRAP +#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); } +#endif + +static inline void IP_ECN_set_ce(struct iphdr *iph) +{ + u32 check = iph->check; + check += __constant_htons(0xFFFE); + iph->check = check + (check>=0xFFFF); + iph->tos |= 1; +} + static inline void sch_tree_lock(struct Qdisc *q) { - write_lock(&qdisc_tree_lock); - spin_lock_bh(&q->dev->queue_lock); + start_bh_atomic(); } static inline void sch_tree_unlock(struct Qdisc *q) { - spin_unlock_bh(&q->dev->queue_lock); - write_unlock(&qdisc_tree_lock); + end_bh_atomic(); } static inline void tcf_tree_lock(struct tcf_proto *tp) { - write_lock(&qdisc_tree_lock); - spin_lock_bh(&tp->q->dev->queue_lock); + wmb(); } static inline void tcf_tree_unlock(struct tcf_proto *tp) { - spin_unlock_bh(&tp->q->dev->queue_lock); - write_unlock(&qdisc_tree_lock); + synchronize_bh(); +} + +static inline void sch_dev_queue_lock(struct device *dev) +{ + start_bh_atomic(); +} + +static inline void sch_dev_queue_unlock(struct device *dev) +{ + end_bh_atomic(); } @@ -135,10 +196,10 @@ { unsigned long old_cl; - tcf_tree_lock(tp); old_cl = *clp; + wmb(); *clp = cl; - tcf_tree_unlock(tp); + synchronize_bh(); return old_cl; } @@ -257,7 +318,7 @@ #define PSCHED_GET_TIME(stamp) \ ({ u64 __cur; \ - rdtscll(__cur); \ + __asm__ __volatile__ (".byte 0x0f,0x31" :"=A" (__cur)); \ (stamp) = __cur>>psched_clock_scale; \ }) @@ -389,7 +450,6 @@ u32 toks; u32 ptoks; psched_time_t t_c; - spinlock_t lock; struct qdisc_rate_table *R_tab; struct qdisc_rate_table *P_tab; @@ -402,7 +462,7 @@ extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p); extern int tcf_police(struct sk_buff *skb, struct tcf_police *p); -static inline void tcf_police_release(struct tcf_police *p) +extern __inline__ void tcf_police_release(struct tcf_police *p) { if (p && --p->refcnt == 0) tcf_police_destroy(p); @@ -413,17 +473,19 @@ extern struct Qdisc_ops pfifo_qdisc_ops; extern struct Qdisc_ops bfifo_qdisc_ops; +extern int call_in_ingress(struct sk_buff *skb); + int register_qdisc(struct Qdisc_ops *qops); int unregister_qdisc(struct Qdisc_ops *qops); -struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); -struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); -void dev_init_scheduler(struct net_device *dev); -void dev_shutdown(struct net_device *dev); -void dev_activate(struct net_device *dev); -void dev_deactivate(struct net_device *dev); +struct Qdisc *qdisc_lookup(struct device *dev, u32 handle); +struct Qdisc *qdisc_lookup_class(struct device *dev, u32 handle); +void dev_init_scheduler(struct device *dev); +void dev_shutdown(struct device *dev); +void dev_activate(struct device *dev); +void dev_deactivate(struct device *dev); void qdisc_reset(struct Qdisc *qdisc); void qdisc_destroy(struct Qdisc *qdisc); -struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops); +struct Qdisc * qdisc_create_dflt(struct device *dev, struct Qdisc_ops *ops); int qdisc_new_estimator(struct tc_stats *stats, struct rtattr *opt); void qdisc_kill_estimator(struct tc_stats *stats); struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab); @@ -432,19 +494,21 @@ int tc_filter_init(void); int pktsched_init(void); -extern int qdisc_restart(struct net_device *dev); +void qdisc_run_queues(void); +int qdisc_restart(struct device *dev); -static inline void qdisc_run(struct net_device *dev) +extern __inline__ void qdisc_wakeup(struct device *dev) { - while (!netif_queue_stopped(dev) && - qdisc_restart(dev)<0) - /* NOTHING */; + if (!dev->tbusy) { + struct Qdisc *q = dev->qdisc; + if (qdisc_restart(dev) && q->h.forw == NULL) { + q->h.forw = qdisc_head.forw; + qdisc_head.forw = &q->h; + } + } } -/* Calculate maximal size of packet seen by hard_start_xmit - routine of this device. - */ -static inline unsigned psched_mtu(struct net_device *dev) +extern __inline__ unsigned psched_mtu(struct device *dev) { unsigned mtu = dev->mtu; return dev->hard_header ? mtu + dev->hard_header_len : mtu; --- ../v2.4.19/linux/include/net/pkt_cls.h Tue Oct 17 20:43:53 2000 +++ linux/include/net/pkt_cls.h Thu Aug 1 00:12:52 2002 @@ -63,7 +63,7 @@ specific classifiers. */ -static inline int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) +extern __inline__ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { int err = 0; u32 protocol = skb->protocol; @@ -77,11 +77,7 @@ return -1; } - - extern int register_tcf_proto_ops(struct tcf_proto_ops *ops); extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); - - #endif diff -urN ../v2.4.19/linux/net/sched/Config.in linux/net/sched/Config.in --- ../v2.4.19/linux/net/sched/Config.in Sun Mar 31 03:18:28 2002 +++ linux/net/sched/Config.in Wed Jul 31 00:00:59 2002 @@ -1,40 +1,38 @@ # # Traffic control configuration. # -tristate ' CBQ packet scheduler' CONFIG_NET_SCH_CBQ -tristate ' CSZ packet scheduler' CONFIG_NET_SCH_CSZ -#tristate ' H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ -#tristate ' H-FSC packet scheduler' CONFIG_NET_SCH_HFCS -if [ "$CONFIG_ATM" = "y" ]; then - bool ' ATM pseudo-scheduler' CONFIG_NET_SCH_ATM -fi -tristate ' The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO -tristate ' RED queue' CONFIG_NET_SCH_RED -tristate ' SFQ queue' CONFIG_NET_SCH_SFQ -tristate ' TEQL queue' CONFIG_NET_SCH_TEQL -tristate ' TBF queue' CONFIG_NET_SCH_TBF -tristate ' GRED queue' CONFIG_NET_SCH_GRED -tristate ' Diffserv field marker' CONFIG_NET_SCH_DSMARK -if [ "$CONFIG_NETFILTER" = "y" ]; then - tristate ' Ingress Qdisc' CONFIG_NET_SCH_INGRESS -fi -bool ' QoS support' CONFIG_NET_QOS +define_bool CONFIG_NETLINK y +define_bool CONFIG_RTNETLINK y +tristate 'CBQ packet scheduler' CONFIG_NET_SCH_CBQ +tristate 'CSZ packet scheduler' CONFIG_NET_SCH_CSZ +#tristate 'H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ +#tristate 'H-FSC packet scheduler' CONFIG_NET_SCH_HFCS +tristate 'The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO +tristate 'RED queue' CONFIG_NET_SCH_RED +tristate 'SFQ queue' CONFIG_NET_SCH_SFQ +tristate 'TEQL queue' CONFIG_NET_SCH_TEQL +tristate 'TBF queue' CONFIG_NET_SCH_TBF +tristate 'GRED queue' CONFIG_NET_SCH_GRED +tristate 'Diffserv field marker' CONFIG_NET_SCH_DSMARK +tristate 'Ingress Qdisc/policing' CONFIG_NET_SCH_INGRESS + +bool 'QoS support' CONFIG_NET_QOS if [ "$CONFIG_NET_QOS" = "y" ]; then - bool ' Rate estimator' CONFIG_NET_ESTIMATOR + bool 'Rate estimator' CONFIG_NET_ESTIMATOR fi -bool ' Packet classifier API' CONFIG_NET_CLS +bool 'Packet classifier API' CONFIG_NET_CLS if [ "$CONFIG_NET_CLS" = "y" ]; then - tristate ' TC index classifier' CONFIG_NET_CLS_TCINDEX - tristate ' Routing table based classifier' CONFIG_NET_CLS_ROUTE4 - if [ "$CONFIG_NET_CLS_ROUTE4" != "n" ]; then - define_bool CONFIG_NET_CLS_ROUTE y - fi - tristate ' Firewall based classifier' CONFIG_NET_CLS_FW - tristate ' U32 classifier' CONFIG_NET_CLS_U32 - if [ "$CONFIG_NET_QOS" = "y" ]; then - tristate ' Special RSVP classifier' CONFIG_NET_CLS_RSVP - tristate ' Special RSVP classifier for IPv6' CONFIG_NET_CLS_RSVP6 - bool ' Traffic policing (needed for in/egress)' CONFIG_NET_CLS_POLICE - fi + tristate 'TC index classifier' CONFIG_NET_CLS_TCINDEX + tristate 'Routing table based classifier' CONFIG_NET_CLS_ROUTE4 + if [ "$CONFIG_NET_CLS_ROUTE4" != "n" ]; then + define_bool CONFIG_NET_CLS_ROUTE y + fi + tristate 'Firewall based classifier' CONFIG_NET_CLS_FW + tristate 'U32 classifier' CONFIG_NET_CLS_U32 + if [ "$CONFIG_NET_QOS" = "y" ]; then + tristate 'Special RSVP classifier' CONFIG_NET_CLS_RSVP + tristate 'Special RSVP classifier for IPv6' CONFIG_NET_CLS_RSVP6 + bool 'Traffic policing (needed for in/egress)' CONFIG_NET_CLS_POLICE + fi fi diff -urN ../v2.4.19/linux/net/sched/Makefile linux/net/sched/Makefile --- ../v2.4.19/linux/net/sched/Makefile Tue Nov 13 01:24:41 2001 +++ linux/net/sched/Makefile Wed Jul 31 00:00:59 2002 @@ -1,34 +1,178 @@ # # Makefile for the Linux Traffic Control Unit. # +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... O_TARGET := sched.o -obj-y := sch_generic.o +O_OBJS := sch_generic.o +ifeq ($(CONFIG_NET_SCHED), y) -obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o -obj-$(CONFIG_NET_ESTIMATOR) += estimator.o -obj-$(CONFIG_NET_CLS) += cls_api.o -obj-$(CONFIG_NET_CLS_POLICE) += police.o -obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o -obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o -obj-$(CONFIG_NET_SCH_CSZ) += sch_csz.o -obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o -obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o -obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o -obj-$(CONFIG_NET_SCH_RED) += sch_red.o -obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o -obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o -obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o -obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o -obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o -obj-$(CONFIG_NET_CLS_TCINDEX) += cls_tcindex.o -obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o -obj-$(CONFIG_NET_CLS_U32) += cls_u32.o -obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o -obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o -obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o -obj-$(CONFIG_NET_CLS_FW) += cls_fw.o +O_OBJS += sch_api.o sch_fifo.o + +ifeq ($(CONFIG_NET_ESTIMATOR), y) +O_OBJS += estimator.o +endif + +ifeq ($(CONFIG_NET_CLS), y) +O_OBJS += cls_api.o + +ifeq ($(CONFIG_NET_CLS_POLICE), y) +O_OBJS += police.o +endif + +endif + +ifeq ($(CONFIG_NET_SCH_INGRESS), y) +O_OBJS += sch_ingress.o +else + ifeq ($(CONFIG_NET_SCH_INGRESS), m) + M_OBJS += sch_ingress.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_CBQ), y) +O_OBJS += sch_cbq.o +else + ifeq ($(CONFIG_NET_SCH_CBQ), m) + M_OBJS += sch_cbq.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_CSZ), y) +O_OBJS += sch_csz.o +else + ifeq ($(CONFIG_NET_SCH_CSZ), m) + M_OBJS += sch_csz.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_HPFQ), y) +O_OBJS += sch_hpfq.o +else + ifeq ($(CONFIG_NET_SCH_HPFQ), m) + M_OBJS += sch_hpfq.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_HFSC), y) +O_OBJS += sch_hfsc.o +else + ifeq ($(CONFIG_NET_SCH_HFSC), m) + M_OBJS += sch_hfsc.o + endif +endif + + +ifeq ($(CONFIG_NET_SCH_SFQ), y) +O_OBJS += sch_sfq.o +else + ifeq ($(CONFIG_NET_SCH_SFQ), m) + M_OBJS += sch_sfq.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_RED), y) +O_OBJS += sch_red.o +else + ifeq ($(CONFIG_NET_SCH_RED), m) + M_OBJS += sch_red.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_TBF), y) +O_OBJS += sch_tbf.o +else + ifeq ($(CONFIG_NET_SCH_TBF), m) + M_OBJS += sch_tbf.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_PRIO), y) +O_OBJS += sch_prio.o +else + ifeq ($(CONFIG_NET_SCH_PRIO), m) + M_OBJS += sch_prio.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_TEQL), y) +O_OBJS += sch_teql.o +else + ifeq ($(CONFIG_NET_SCH_TEQL), m) + M_OBJS += sch_teql.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_GRED), y) +O_OBJS += sch_gred.o +else + ifeq ($(CONFIG_NET_SCH_GRED), m) + M_OBJS += sch_gred.o + endif +endif + +ifeq ($(CONFIG_NET_SCH_DSMARK), y) +O_OBJS += sch_dsmark.o +else + ifeq ($(CONFIG_NET_SCH_DSMARK), m) + M_OBJS += sch_dsmark.o + endif +endif + +ifeq ($(CONFIG_NET_CLS_TCINDEX), y) +O_OBJS += cls_tcindex.o +else + ifeq ($(CONFIG_NET_CLS_TCINDEX), m) + M_OBJS += cls_tcindex.o + endif +endif + +ifeq ($(CONFIG_NET_CLS_U32), y) +O_OBJS += cls_u32.o +else + ifeq ($(CONFIG_NET_CLS_U32), m) + M_OBJS += cls_u32.o + endif +endif + +ifeq ($(CONFIG_NET_CLS_RSVP), y) +O_OBJS += cls_rsvp.o +else + ifeq ($(CONFIG_NET_CLS_RSVP), m) + M_OBJS += cls_rsvp.o + endif +endif + +ifeq ($(CONFIG_NET_CLS_RSVP6), y) +O_OBJS += cls_rsvp6.o +else + ifeq ($(CONFIG_NET_CLS_RSVP6), m) + M_OBJS += cls_rsvp6.o + endif +endif + +ifeq ($(CONFIG_NET_CLS_ROUTE4), y) +O_OBJS += cls_route.o +else + ifeq ($(CONFIG_NET_CLS_ROUTE4), m) + M_OBJS += cls_route.o + endif +endif + +ifeq ($(CONFIG_NET_CLS_FW), y) +O_OBJS += cls_fw.o +else + ifeq ($(CONFIG_NET_CLS_FW), m) + M_OBJS += cls_fw.o + endif +endif + +endif include $(TOPDIR)/Rules.make diff -urN ../v2.4.19/linux/net/sched/cls_api.c linux/net/sched/cls_api.c --- ../v2.4.19/linux/net/sched/cls_api.c Sun Mar 31 03:18:28 2002 +++ linux/net/sched/cls_api.c Sun Aug 4 16:57:48 2002 @@ -39,24 +39,20 @@ static struct tcf_proto_ops *tcf_proto_base; -/* Protects list of registered TC modules. It is pure SMP lock. */ -static rwlock_t cls_mod_lock = RW_LOCK_UNLOCKED; /* Find classifier type by string name */ struct tcf_proto_ops * tcf_proto_lookup_ops(struct rtattr *kind) { - struct tcf_proto_ops *t = NULL; + struct tcf_proto_ops *t; if (kind) { - read_lock(&cls_mod_lock); for (t = tcf_proto_base; t; t = t->next) { if (rtattr_strcmp(kind, t->kind) == 0) - break; + return t; } - read_unlock(&cls_mod_lock); } - return t; + return NULL; } /* Register(unregister) new classifier type */ @@ -65,17 +61,12 @@ { struct tcf_proto_ops *t, **tp; - write_lock(&cls_mod_lock); - for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) { - if (strcmp(ops->kind, t->kind) == 0) { - write_unlock(&cls_mod_lock); + for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) + if (strcmp(ops->kind, t->kind) == 0) return -EEXIST; - } - } ops->next = NULL; *tp = ops; - write_unlock(&cls_mod_lock); return 0; } @@ -83,20 +74,18 @@ { struct tcf_proto_ops *t, **tp; - write_lock(&cls_mod_lock); for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) if (t == ops) break; - if (!t) { - write_unlock(&cls_mod_lock); + if (!t) return -ENOENT; - } *tp = t->next; - write_unlock(&cls_mod_lock); return 0; } +#ifdef CONFIG_RTNETLINK + static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, unsigned long fh, int event); @@ -123,7 +112,7 @@ u32 prio = TC_H_MAJ(t->tcm_info); u32 nprio = prio; u32 parent = t->tcm_parent; - struct net_device *dev; + struct device *dev; struct Qdisc *q; struct tcf_proto **back, **chain; struct tcf_proto *tp = NULL; @@ -143,7 +132,7 @@ /* Find head of filter chain. */ /* Find link */ - if ((dev = __dev_get_by_index(t->tcm_ifindex)) == NULL) + if ((dev = dev_get_by_index(t->tcm_ifindex)) == NULL) return -ENODEV; /* Find qdisc */ @@ -228,12 +217,10 @@ kfree(tp); goto errout; } - write_lock(&qdisc_tree_lock); - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); tp->next = *back; *back = tp; - spin_unlock_bh(&dev->queue_lock); - write_unlock(&qdisc_tree_lock); + sch_dev_queue_unlock(dev); } else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind)) goto errout; @@ -241,11 +228,8 @@ if (fh == 0) { if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { - write_lock(&qdisc_tree_lock); - spin_lock_bh(&dev->queue_lock); *back = tp->next; - spin_unlock_bh(&dev->queue_lock); - write_unlock(&qdisc_tree_lock); + synchronize_bh(); tp->ops->destroy(tp); kfree(tp); @@ -350,7 +334,7 @@ { int t; int s_t; - struct net_device *dev; + struct device *dev; struct Qdisc *q; struct tcf_proto *tp, **chain; struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); @@ -362,17 +346,12 @@ return skb->len; if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) return skb->len; - - read_lock(&qdisc_tree_lock); if (!tcm->tcm_parent) q = dev->qdisc_sleeping; else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); - if (q == NULL) { - read_unlock(&qdisc_tree_lock); - dev_put(dev); + if (q == NULL) return skb->len; - } if ((cops = q->ops->cl_ops) == NULL) goto errout; if (TC_H_MIN(tcm->tcm_parent)) { @@ -423,14 +402,15 @@ if (cl) cops->put(q, cl); - read_unlock(&qdisc_tree_lock); - dev_put(dev); return skb->len; } +#endif -int __init tc_filter_init(void) + +__initfunc(int tc_filter_init(void)) { +#ifdef CONFIG_RTNETLINK struct rtnetlink_link *link_p = rtnetlink_links[PF_UNSPEC]; /* Setup rtnetlink links. It is made here to avoid @@ -443,6 +423,7 @@ link_p[RTM_GETTFILTER-RTM_BASE].doit = tc_ctl_tfilter; link_p[RTM_GETTFILTER-RTM_BASE].dumpit = tc_dump_tfilter; } +#endif #define INIT_TC_FILTER(name) { \ extern struct tcf_proto_ops cls_##name##_ops; \ register_tcf_proto_ops(&cls_##name##_ops); \ diff -urN ../v2.4.19/linux/net/sched/cls_fw.c linux/net/sched/cls_fw.c --- ../v2.4.19/linux/net/sched/cls_fw.c Sun Mar 31 03:18:28 2002 +++ linux/net/sched/cls_fw.c Thu Aug 1 00:34:59 2002 @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -65,8 +64,8 @@ { struct fw_head *head = (struct fw_head*)tp->root; struct fw_filter *f; -#ifdef CONFIG_NETFILTER - u32 id = skb->nfmark; +#ifdef CONFIG_IP_FIREWALL + u32 id = skb->fwmark; #else u32 id = 0; #endif @@ -376,4 +375,3 @@ unregister_tcf_proto_ops(&cls_fw_ops); } #endif -MODULE_LICENSE("GPL"); diff -urN ../v2.4.19/linux/net/sched/cls_route.c linux/net/sched/cls_route.c --- ../v2.4.19/linux/net/sched/cls_route.c Sun Mar 31 03:18:28 2002 +++ linux/net/sched/cls_route.c Sun Aug 4 17:08:47 2002 @@ -83,11 +83,11 @@ return id&0xF; } -static void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id) +static void route4_reset_fastmap(struct device *dev, struct route4_head *head, u32 id) { - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); memset(head->fastmap, 0, sizeof(head->fastmap)); - spin_unlock_bh(&dev->queue_lock); + sch_dev_queue_unlock(dev); } static void __inline__ diff -urN ../v2.4.19/linux/net/sched/estimator.c linux/net/sched/estimator.c --- ../v2.4.19/linux/net/sched/estimator.c Fri Jun 2 07:20:30 2000 +++ linux/net/sched/estimator.c Sat Oct 21 12:10:50 2000 @@ -97,38 +97,29 @@ static struct qdisc_estimator_head elist[EST_MAX_INTERVAL+1]; -/* Estimator array lock */ -static rwlock_t est_lock = RW_LOCK_UNLOCKED; - static void est_timer(unsigned long arg) { int idx = (int)arg; struct qdisc_estimator *e; - read_lock(&est_lock); for (e = elist[idx].list; e; e = e->next) { - struct tc_stats *st = e->stats; - u64 nbytes; - u32 npackets; + u64 nbytes = e->stats->bytes; + u32 npackets = e->stats->packets; u32 rate; - - spin_lock(st->lock); - nbytes = st->bytes; - npackets = st->packets; + rate = (nbytes - e->last_bytes)<<(7 - idx); e->last_bytes = nbytes; e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; - st->bps = (e->avbps+0xF)>>5; + e->stats->bps = (e->avbps+0xF)>>5; rate = (npackets - e->last_packets)<<(12 - idx); e->last_packets = npackets; e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; e->stats->pps = (e->avpps+0x1FF)>>10; - spin_unlock(st->lock); } - mod_timer(&elist[idx].timer, jiffies + ((HZ/4)<interval].timer.function = est_timer; add_timer(&elist[est->interval].timer); } - write_lock_bh(&est_lock); elist[est->interval].list = est; - write_unlock_bh(&est_lock); return 0; } @@ -183,9 +172,8 @@ continue; } - write_lock_bh(&est_lock); *pest = est->next; - write_unlock_bh(&est_lock); + synchronize_bh(); kfree(est); killed++; diff -urN ../v2.4.19/linux/net/sched/police.c linux/net/sched/police.c --- ../v2.4.19/linux/net/sched/police.c Sun Mar 31 03:18:28 2002 +++ linux/net/sched/police.c Wed Aug 7 23:28:12 2002 @@ -36,10 +36,6 @@ static u32 idx_gen; static struct tcf_police *tcf_police_ht[16]; -/* Policer hash table lock */ -static rwlock_t police_lock = RW_LOCK_UNLOCKED; - -/* Each policer is serialized by its individual spinlock */ static __inline__ unsigned tcf_police_hash(u32 index) { @@ -50,13 +46,11 @@ { struct tcf_police *p; - read_lock(&police_lock); for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) { if (p->index == index) - break; + return p; } - read_unlock(&police_lock); - return p; + return NULL; } static __inline__ u32 tcf_police_new_index(void) @@ -77,9 +71,8 @@ for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) { if (*p1p == p) { - write_lock_bh(&police_lock); *p1p = p->next; - write_unlock_bh(&police_lock); + synchronize_bh(); #ifdef CONFIG_NET_ESTIMATOR qdisc_kill_estimator(&p->stats); #endif @@ -120,8 +113,6 @@ memset(p, 0, sizeof(*p)); p->refcnt = 1; - spin_lock_init(&p->lock); - p->stats.lock = &p->lock; if (parm->rate.rate) { if ((p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1])) == NULL) goto failure; @@ -152,10 +143,10 @@ qdisc_new_estimator(&p->stats, est); #endif h = tcf_police_hash(p->index); - write_lock_bh(&police_lock); p->next = tcf_police_ht[h]; + wmb(); tcf_police_ht[h] = p; - write_unlock_bh(&police_lock); + synchronize_bh(); return p; failure: @@ -171,24 +162,19 @@ long toks; long ptoks = 0; - spin_lock(&p->lock); - p->stats.bytes += skb->len; p->stats.packets++; #ifdef CONFIG_NET_ESTIMATOR if (p->ewma_rate && p->stats.bps >= p->ewma_rate) { p->stats.overlimits++; - spin_unlock(&p->lock); return p->action; } #endif if (skb->len <= p->mtu) { - if (p->R_tab == NULL) { - spin_unlock(&p->lock); + if (p->R_tab == NULL) return p->result; - } PSCHED_GET_TIME(now); @@ -209,16 +195,15 @@ p->t_c = now; p->toks = toks; p->ptoks = ptoks; - spin_unlock(&p->lock); return p->result; } } p->stats.overlimits++; - spin_unlock(&p->lock); return p->action; } +#ifdef CONFIG_RTNETLINK int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) { unsigned char *b = skb->tail; @@ -249,3 +234,5 @@ skb_trim(skb, b - skb->data); return -1; } +#endif + diff -urN ../v2.4.19/linux/net/sched/sch_api.c linux/net/sched/sch_api.c --- ../v2.4.19/linux/net/sched/sch_api.c Sat Aug 3 09:13:57 2002 +++ linux/net/sched/sch_api.c Wed Aug 7 23:27:29 2002 @@ -11,8 +11,10 @@ * Fixes: * * Rani Assaf :980802: JIFFIES and CPU clock sources are repaired. + * J Hadi Salim (hadi@nortelnetworks.com):981128: "Append" message + * * Eduardo J. Blanco :990222: kmod support - * Jamal Hadi Salim : 990601: ingress support + * Jamal Hadi Salim : 990501: ingress support */ #include @@ -32,6 +34,7 @@ #include #include #include +#include #include #include @@ -41,10 +44,12 @@ #include #include +#ifdef CONFIG_RTNETLINK static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new); static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, struct Qdisc *q, unsigned long cl, int event); +#endif /* @@ -127,10 +132,6 @@ changes qdisc parameters. */ -/* Protects list of registered TC modules. It is pure SMP lock. */ -static rwlock_t qdisc_mod_lock = RW_LOCK_UNLOCKED; - - /************************************************ * Queueing disciplines manipulation. * ************************************************/ @@ -146,10 +147,8 @@ { struct Qdisc_ops *q, **qp; - write_lock(&qdisc_mod_lock); for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) { if (strcmp(qops->id, q->id) == 0) { - write_unlock(&qdisc_mod_lock); return -EEXIST; } } @@ -163,7 +162,6 @@ qops->next = NULL; *qp = qops; - write_unlock(&qdisc_mod_lock); return 0; } @@ -172,7 +170,6 @@ struct Qdisc_ops *q, **qp; int err = -ENOENT; - write_lock(&qdisc_mod_lock); for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) if (q == qops) break; @@ -181,7 +178,6 @@ q->next = NULL; err = 0; } - write_unlock(&qdisc_mod_lock); return err; } @@ -189,7 +185,7 @@ (root qdisc, all its children, children of children etc.) */ -struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) +struct Qdisc *qdisc_lookup(struct device *dev, u32 handle) { struct Qdisc *q; @@ -224,12 +220,10 @@ struct Qdisc_ops *q = NULL; if (kind) { - read_lock(&qdisc_mod_lock); for (q = qdisc_base; q; q = q->next) { if (rtattr_strcmp(kind, q->id) == 0) break; } - read_unlock(&qdisc_mod_lock); } return q; } @@ -280,7 +274,7 @@ /* Allocate an unique handle from space managed by kernel */ -u32 qdisc_alloc_handle(struct net_device *dev) +u32 qdisc_alloc_handle(struct device *dev) { int i = 0x10000; static u32 autohandle = TC_H_MAKE(0x80000000U, 0); @@ -297,15 +291,14 @@ /* Attach toplevel qdisc to device dev */ static struct Qdisc * -dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) +dev_graft_qdisc(struct device *dev, struct Qdisc *qdisc) { struct Qdisc *oqdisc; if (dev->flags & IFF_UP) dev_deactivate(dev); - write_lock(&qdisc_tree_lock); - spin_lock_bh(&dev->queue_lock); + start_bh_atomic(); if (qdisc && qdisc->flags&TCQ_F_INGRES) { oqdisc = dev->qdisc_ingress; /* Prune old scheduler */ @@ -332,8 +325,7 @@ dev->qdisc = &noop_qdisc; } - spin_unlock_bh(&dev->queue_lock); - write_unlock(&qdisc_tree_lock); + end_bh_atomic(); if (dev->flags & IFF_UP) dev_activate(dev); @@ -348,7 +340,7 @@ Old qdisc is not destroyed but returned in *old. */ -int qdisc_graft(struct net_device *dev, struct Qdisc *parent, u32 classid, +int qdisc_graft(struct device *dev, struct Qdisc *parent, u32 classid, struct Qdisc *new, struct Qdisc **old) { int err = 0; @@ -384,7 +376,7 @@ */ static struct Qdisc * -qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) +qdisc_create(struct device *dev, u32 handle, struct rtattr **tca, int *errp) { int err; struct rtattr *kind = tca[TCA_KIND-1]; @@ -434,7 +426,6 @@ sch->dequeue = ops->dequeue; sch->dev = dev; atomic_set(&sch->refcnt, 1); - sch->stats.lock = &dev->queue_lock; if (handle == 0) { handle = qdisc_alloc_handle(dev); err = -ENOMEM; @@ -448,10 +439,8 @@ sch->handle = handle; if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) { - write_lock(&qdisc_tree_lock); sch->next = dev->qdisc_list; dev->qdisc_list = sch; - write_unlock(&qdisc_tree_lock); #ifdef CONFIG_NET_ESTIMATOR if (tca[TCA_RATE-1]) qdisc_new_estimator(&sch->stats, tca[TCA_RATE-1]); @@ -534,13 +523,13 @@ { struct tcmsg *tcm = NLMSG_DATA(n); struct rtattr **tca = arg; - struct net_device *dev; + struct device *dev; u32 clid = tcm->tcm_parent; struct Qdisc *q = NULL; struct Qdisc *p = NULL; int err; - if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) return -ENODEV; if (clid) { @@ -577,9 +566,9 @@ return err; if (q) { qdisc_notify(skb, n, clid, q, NULL); - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); qdisc_destroy(q); - spin_unlock_bh(&dev->queue_lock); + sch_dev_queue_unlock(dev); } } else { qdisc_notify(skb, n, clid, NULL, q); @@ -595,13 +584,13 @@ { struct tcmsg *tcm = NLMSG_DATA(n); struct rtattr **tca = arg; - struct net_device *dev; + struct device *dev; u32 clid = tcm->tcm_parent; struct Qdisc *q = NULL; struct Qdisc *p = NULL; int err; - if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) return -ENODEV; if (clid) { @@ -703,17 +692,17 @@ err = qdisc_graft(dev, p, clid, q, &old_q); if (err) { if (q) { - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); qdisc_destroy(q); - spin_unlock_bh(&dev->queue_lock); + sch_dev_queue_unlock(dev); } return err; } qdisc_notify(skb, n, clid, old_q, q); if (old_q) { - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); qdisc_destroy(old_q); - spin_unlock_bh(&dev->queue_lock); + sch_dev_queue_unlock(dev); } } return 0; @@ -721,13 +710,13 @@ int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st) { - spin_lock_bh(st->lock); - RTA_PUT(skb, TCA_STATS, (char*)&st->lock - (char*)st, st); - spin_unlock_bh(st->lock); + start_bh_atomic(); + RTA_PUT(skb, TCA_STATS, sizeof(*st), st); + end_bh_atomic(); return 0; rtattr_failure: - spin_unlock_bh(st->lock); + end_bh_atomic(); return -1; } @@ -793,34 +782,28 @@ { int idx, q_idx; int s_idx, s_q_idx; - struct net_device *dev; + struct device *dev; struct Qdisc *q; s_idx = cb->args[0]; s_q_idx = q_idx = cb->args[1]; - read_lock(&dev_base_lock); for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { if (idx < s_idx) continue; if (idx > s_idx) s_q_idx = 0; - read_lock(&qdisc_tree_lock); for (q = dev->qdisc_list, q_idx = 0; q; q = q->next, q_idx++) { if (q_idx < s_q_idx) continue; if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) { - read_unlock(&qdisc_tree_lock); goto done; } } - read_unlock(&qdisc_tree_lock); } done: - read_unlock(&dev_base_lock); - cb->args[0] = idx; cb->args[1] = q_idx; @@ -839,7 +822,7 @@ { struct tcmsg *tcm = NLMSG_DATA(n); struct rtattr **tca = arg; - struct net_device *dev; + struct device *dev; struct Qdisc *q = NULL; struct Qdisc_class_ops *cops; unsigned long cl = 0; @@ -849,7 +832,7 @@ u32 qid = TC_H_MAJ(clid); int err; - if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) return -ENODEV; /* @@ -1013,7 +996,7 @@ { int t; int s_t; - struct net_device *dev; + struct device *dev; struct Qdisc *q; struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); struct qdisc_dump_args arg; @@ -1025,7 +1008,6 @@ s_t = cb->args[0]; - read_lock(&qdisc_tree_lock); for (q=dev->qdisc_list, t=0; q; q = q->next, t++) { if (t < s_t) continue; if (!q->ops->cl_ops) continue; @@ -1044,14 +1026,20 @@ if (arg.w.stop) break; } - read_unlock(&qdisc_tree_lock); cb->args[0] = t; - dev_put(dev); return skb->len; } +#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) +int call_in_ingress(struct sk_buff *skb) +{ + if (!skb->dev->qdisc_ingress) return FW_ACCEPT; + return skb->dev->qdisc_ingress->enqueue(skb,skb->dev->qdisc_ingress); +} +#endif + int psched_us_per_tick = 1; int psched_tick_per_us = 1; @@ -1126,7 +1114,7 @@ #endif #if PSCHED_CLOCK_SOURCE == PSCHED_CPU -int __init psched_calibrate_clock(void) +__initfunc(int psched_calibrate_clock(void)) { psched_time_t stamp, stamp1; struct timeval tv, tv1; @@ -1134,16 +1122,19 @@ long rdelay; unsigned long stop; +#if CPU == 586 || CPU == 686 + if (!(boot_cpu_data.x86_capability & X86_FEATURE_TSC)) + return -1; +#endif + #ifdef PSCHED_WATCHER psched_tick(0); #endif stop = jiffies + HZ/10; PSCHED_GET_TIME(stamp); do_gettimeofday(&tv); - while (time_before(jiffies, stop)) { + while (time_before(jiffies, stop)) barrier(); - cpu_relax(); - } PSCHED_GET_TIME(stamp1); do_gettimeofday(&tv1); @@ -1162,9 +1153,12 @@ } #endif -int __init pktsched_init(void) +__initfunc(int pktsched_init(void)) { struct rtnetlink_link *link_p; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *ent; +#endif #if PSCHED_CLOCK_SOURCE == PSCHED_CPU if (psched_calibrate_clock() < 0) @@ -1238,15 +1232,13 @@ #ifdef CONFIG_NET_SCH_PRIO INIT_QDISC(prio); #endif -#ifdef CONFIG_NET_SCH_ATM - INIT_QDISC(atm); -#endif #ifdef CONFIG_NET_CLS tc_filter_init(); #endif #ifdef CONFIG_PROC_FS - create_proc_read_entry("net/psched", 0, 0, psched_read_proc, NULL); + ent = create_proc_entry("net/psched", 0, 0); + ent->read_proc = psched_read_proc; #endif return 0; diff -urN ../v2.4.19/linux/net/sched/sch_atm.c linux/net/sched/sch_atm.c --- ../v2.4.19/linux/net/sched/sch_atm.c Sun Mar 31 03:18:28 2002 +++ linux/net/sched/sch_atm.c Thu Jan 1 00:00:00 1970 @@ -1,718 +0,0 @@ -/* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */ - -/* Written 1998-2000 by Werner Almesberger, EPFL ICA */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for fput */ -#include -#include - - -extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */ -#define sockfd_put(sock) fput((sock)->file) /* @@@ copied because it's - __inline__ in socket.c */ - - -#if 0 /* control */ -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - -#if 0 /* data */ -#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define D2PRINTK(format,args...) -#endif - - -/* - * The ATM queuing discipline provides a framework for invoking classifiers - * (aka "filters"), which in turn select classes of this queuing discipline. - * Each class maps the flow(s) it is handling to a given VC. Multiple classes - * may share the same VC. - * - * When creating a class, VCs are specified by passing the number of the open - * socket descriptor by which the calling process references the VC. The kernel - * keeps the VC open at least until all classes using it are removed. - * - * In this file, most functions are named atm_tc_* to avoid confusion with all - * the atm_* in net/atm. This naming convention differs from what's used in the - * rest of net/sched. - * - * Known bugs: - * - sometimes messes up the IP stack - * - any manipulations besides the few operations described in the README, are - * untested and likely to crash the system - * - should lock the flow while there is data in the queue (?) - */ - - -#define PRIV(sch) ((struct atm_qdisc_data *) (sch)->data) -#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back)) - - -struct atm_flow_data { - struct Qdisc *q; /* FIFO, TBF, etc. */ - struct tcf_proto *filter_list; - struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ - void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* chaining */ - struct atm_qdisc_data *parent; /* parent qdisc */ - struct socket *sock; /* for closing */ - u32 classid; /* x:y type ID */ - int ref; /* reference count */ - struct tc_stats stats; - struct atm_flow_data *next; - struct atm_flow_data *excess; /* flow for excess traffic; - NULL to set CLP instead */ - int hdr_len; - unsigned char hdr[0]; /* header data; MUST BE LAST */ -}; - -struct atm_qdisc_data { - struct atm_flow_data link; /* unclassified skbs go here */ - struct atm_flow_data *flows; /* NB: "link" is also on this - list */ - struct tasklet_struct task; /* requeue tasklet */ -}; - - -/* ------------------------- Class/flow operations ------------------------- */ - - -static int find_flow(struct atm_qdisc_data *qdisc,struct atm_flow_data *flow) -{ - struct atm_flow_data *walk; - - DPRINTK("find_flow(qdisc %p,flow %p)\n",qdisc,flow); - for (walk = qdisc->flows; walk; walk = walk->next) - if (walk == flow) return 1; - DPRINTK("find_flow: not found\n"); - return 0; -} - - -static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch, - u32 classid) -{ - struct atm_flow_data *flow; - - for (flow = PRIV(sch)->flows; flow; flow = flow->next) - if (flow->classid == classid) break; - return flow; -} - - -static int atm_tc_graft(struct Qdisc *sch,unsigned long arg, - struct Qdisc *new,struct Qdisc **old) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow = (struct atm_flow_data *) arg; - - DPRINTK("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",sch, - p,flow,new,old); - if (!find_flow(p,flow)) return -EINVAL; - if (!new) new = &noop_qdisc; - *old = xchg(&flow->q,new); - if (*old) qdisc_reset(*old); - return 0; -} - - -static struct Qdisc *atm_tc_leaf(struct Qdisc *sch,unsigned long cl) -{ - struct atm_flow_data *flow = (struct atm_flow_data *) cl; - - DPRINTK("atm_tc_leaf(sch %p,flow %p)\n",sch,flow); - return flow ? flow->q : NULL; -} - - -static unsigned long atm_tc_get(struct Qdisc *sch,u32 classid) -{ - struct atm_qdisc_data *p __attribute__((unused)) = PRIV(sch); - struct atm_flow_data *flow; - - DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid); - flow = lookup_flow(sch,classid); - if (flow) flow->ref++; - DPRINTK("atm_tc_get: flow %p\n",flow); - return (unsigned long) flow; -} - - -static unsigned long atm_tc_bind_filter(struct Qdisc *sch, - unsigned long parent, u32 classid) -{ - return atm_tc_get(sch,classid); -} - - -static void destroy_filters(struct atm_flow_data *flow) -{ - struct tcf_proto *filter; - - while ((filter = flow->filter_list)) { - DPRINTK("destroy_filters: destroying filter %p\n",filter); - flow->filter_list = filter->next; - filter->ops->destroy(filter); - } -} - - -/* - * atm_tc_put handles all destructions, including the ones that are explicitly - * requested (atm_tc_destroy, etc.). The assumption here is that we never drop - * anything that still seems to be in use. - */ - -static void atm_tc_put(struct Qdisc *sch, unsigned long cl) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow = (struct atm_flow_data *) cl; - struct atm_flow_data **prev; - - DPRINTK("atm_tc_put(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); - if (--flow->ref) return; - DPRINTK("atm_tc_put: destroying\n"); - for (prev = &p->flows; *prev; prev = &(*prev)->next) - if (*prev == flow) break; - if (!*prev) { - printk(KERN_CRIT "atm_tc_put: class %p not found\n",flow); - return; - } - *prev = flow->next; - DPRINTK("atm_tc_put: qdisc %p\n",flow->q); - qdisc_destroy(flow->q); - destroy_filters(flow); - if (flow->sock) { - DPRINTK("atm_tc_put: f_count %d\n", - file_count(flow->sock->file)); - flow->vcc->pop = flow->old_pop; - sockfd_put(flow->sock); - } - if (flow->excess) atm_tc_put(sch,(unsigned long) flow->excess); - if (flow != &p->link) kfree(flow); - /* - * If flow == &p->link, the qdisc no longer works at this point and - * needs to be removed. (By the caller of atm_tc_put.) - */ -} - - -static void sch_atm_pop(struct atm_vcc *vcc,struct sk_buff *skb) -{ - struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent; - - D2PRINTK("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n",vcc,skb,p); - VCC2FLOW(vcc)->old_pop(vcc,skb); - tasklet_schedule(&p->task); -} - - -static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, - struct rtattr **tca, unsigned long *arg) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow = (struct atm_flow_data *) *arg; - struct atm_flow_data *excess = NULL; - struct rtattr *opt = tca[TCA_OPTIONS-1]; - struct rtattr *tb[TCA_ATM_MAX]; - struct socket *sock; - int fd,error,hdr_len; - void *hdr; - - DPRINTK("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x," - "flow %p,opt %p)\n",sch,p,classid,parent,flow,opt); - /* - * The concept of parents doesn't apply for this qdisc. - */ - if (parent && parent != TC_H_ROOT && parent != sch->handle) - return -EINVAL; - /* - * ATM classes cannot be changed. In order to change properties of the - * ATM connection, that socket needs to be modified directly (via the - * native ATM API. In order to send a flow to a different VC, the old - * class needs to be removed and a new one added. (This may be changed - * later.) - */ - if (flow) return -EBUSY; - if (opt == NULL || rtattr_parse(tb,TCA_ATM_MAX,RTA_DATA(opt), - RTA_PAYLOAD(opt))) return -EINVAL; - if (!tb[TCA_ATM_FD-1] || RTA_PAYLOAD(tb[TCA_ATM_FD-1]) < sizeof(fd)) - return -EINVAL; - fd = *(int *) RTA_DATA(tb[TCA_ATM_FD-1]); - DPRINTK("atm_tc_change: fd %d\n",fd); - if (tb[TCA_ATM_HDR-1]) { - hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR-1]); - hdr = RTA_DATA(tb[TCA_ATM_HDR-1]); - } - else { - hdr_len = RFC1483LLC_LEN; - hdr = NULL; /* default LLC/SNAP for IP */ - } - if (!tb[TCA_ATM_EXCESS-1]) excess = NULL; - else { - if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS-1]) != sizeof(u32)) - return -EINVAL; - excess = (struct atm_flow_data *) atm_tc_get(sch, - *(u32 *) RTA_DATA(tb[TCA_ATM_EXCESS-1])); - if (!excess) return -ENOENT; - } - DPRINTK("atm_tc_change: type %d, payload %d, hdr_len %d\n", - opt->rta_type,RTA_PAYLOAD(opt),hdr_len); - if (!(sock = sockfd_lookup(fd,&error))) return error; /* f_count++ */ - DPRINTK("atm_tc_change: f_count %d\n",file_count(sock->file)); - if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { - error = -EPROTOTYPE; - goto err_out; - } - /* @@@ should check if the socket is really operational or we'll crash - on vcc->send */ - if (classid) { - if (TC_H_MAJ(classid ^ sch->handle)) { - DPRINTK("atm_tc_change: classid mismatch\n"); - error = -EINVAL; - goto err_out; - } - if (find_flow(p,flow)) { - error = -EEXIST; - goto err_out; - } - } - else { - int i; - unsigned long cl; - - for (i = 1; i < 0x8000; i++) { - classid = TC_H_MAKE(sch->handle,0x8000 | i); - if (!(cl = atm_tc_get(sch,classid))) break; - atm_tc_put(sch,cl); - } - } - DPRINTK("atm_tc_change: new id %x\n",classid); - flow = kmalloc(sizeof(struct atm_flow_data)+hdr_len,GFP_KERNEL); - DPRINTK("atm_tc_change: flow %p\n",flow); - if (!flow) { - error = -ENOBUFS; - goto err_out; - } - memset(flow,0,sizeof(*flow)); - flow->filter_list = NULL; - if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops))) - flow->q = &noop_qdisc; - DPRINTK("atm_tc_change: qdisc %p\n",flow->q); - flow->sock = sock; - flow->vcc = ATM_SD(sock); /* speedup */ - flow->vcc->user_back = flow; - DPRINTK("atm_tc_change: vcc %p\n",flow->vcc); - flow->old_pop = flow->vcc->pop; - flow->parent = p; - flow->vcc->pop = sch_atm_pop; - flow->classid = classid; - flow->ref = 1; - flow->excess = excess; - flow->next = p->link.next; - p->link.next = flow; - flow->hdr_len = hdr_len; - if (hdr) memcpy(flow->hdr,hdr,hdr_len); - else { - memcpy(flow->hdr,llc_oui,sizeof(llc_oui)); - ((u16 *) flow->hdr)[3] = htons(ETH_P_IP); - } - *arg = (unsigned long) flow; - return 0; -err_out: - if (excess) atm_tc_put(sch,(unsigned long) excess); - sockfd_put(sock); - return error; -} - - -static int atm_tc_delete(struct Qdisc *sch,unsigned long arg) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow = (struct atm_flow_data *) arg; - - DPRINTK("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); - if (!find_flow(PRIV(sch),flow)) return -EINVAL; - if (flow->filter_list || flow == &p->link) return -EBUSY; - /* - * Reference count must be 2: one for "keepalive" (set at class - * creation), and one for the reference held when calling delete. - */ - if (flow->ref < 2) { - printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n",flow->ref); - return -EINVAL; - } - if (flow->ref > 2) return -EBUSY; /* catch references via excess, etc.*/ - atm_tc_put(sch,arg); - return 0; -} - - -static void atm_tc_walk(struct Qdisc *sch,struct qdisc_walker *walker) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow; - - DPRINTK("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker); - if (walker->stop) return; - for (flow = p->flows; flow; flow = flow->next) { - if (walker->count >= walker->skip) - if (walker->fn(sch,(unsigned long) flow,walker) < 0) { - walker->stop = 1; - break; - } - walker->count++; - } -} - - -static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch,unsigned long cl) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow = (struct atm_flow_data *) cl; - - DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); - return flow ? &flow->filter_list : &p->link.filter_list; -} - - -/* --------------------------- Qdisc operations ---------------------------- */ - - -static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow = NULL ; /* @@@ */ - struct tcf_result res; - int result; - int ret = NET_XMIT_POLICED; - - D2PRINTK("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); - result = TC_POLICE_OK; /* be nice to gcc */ - if (TC_H_MAJ(skb->priority) != sch->handle || - !(flow = (struct atm_flow_data *) atm_tc_get(sch,skb->priority))) - for (flow = p->flows; flow; flow = flow->next) - if (flow->filter_list) { - result = tc_classify(skb,flow->filter_list, - &res); - if (result < 0) continue; - flow = (struct atm_flow_data *) res.class; - if (!flow) flow = lookup_flow(sch,res.classid); - break; - } - if (!flow) flow = &p->link; - else { - if (flow->vcc) - ATM_SKB(skb)->atm_options = flow->vcc->atm_options; - /*@@@ looks good ... but it's not supposed to work :-)*/ -#ifdef CONFIG_NET_CLS_POLICE - switch (result) { - case TC_POLICE_SHOT: - kfree_skb(skb); - break; - case TC_POLICE_RECLASSIFY: - if (flow->excess) flow = flow->excess; - else { - ATM_SKB(skb)->atm_options |= - ATM_ATMOPT_CLP; - break; - } - /* fall through */ - case TC_POLICE_OK: - /* fall through */ - default: - break; - } -#endif - } - if ( -#ifdef CONFIG_NET_CLS_POLICE - result == TC_POLICE_SHOT || -#endif - (ret = flow->q->enqueue(skb,flow->q)) != 0) { - sch->stats.drops++; - if (flow) flow->stats.drops++; - return ret; - } - sch->stats.bytes += skb->len; - sch->stats.packets++; - flow->stats.bytes += skb->len; - flow->stats.packets++; - /* - * Okay, this may seem weird. We pretend we've dropped the packet if - * it goes via ATM. The reason for this is that the outer qdisc - * expects to be able to q->dequeue the packet later on if we return - * success at this place. Also, sch->q.qdisc needs to reflect whether - * there is a packet egligible for dequeuing or not. Note that the - * statistics of the outer qdisc are necessarily wrong because of all - * this. There's currently no correct solution for this. - */ - if (flow == &p->link) { - sch->q.qlen++; - return 0; - } - tasklet_schedule(&p->task); - return NET_XMIT_BYPASS; -} - - -/* - * Dequeue packets and send them over ATM. Note that we quite deliberately - * avoid checking net_device's flow control here, simply because sch_atm - * uses its own channels, which have nothing to do with any CLIP/LANE/or - * non-ATM interfaces. - */ - - -static void sch_atm_dequeue(unsigned long data) -{ - struct Qdisc *sch = (struct Qdisc *) data; - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow; - struct sk_buff *skb; - - D2PRINTK("sch_atm_dequeue(sch %p,[qdisc %p])\n",sch,p); - for (flow = p->link.next; flow; flow = flow->next) - /* - * If traffic is properly shaped, this won't generate nasty - * little bursts. Otherwise, it may ... (but that's okay) - */ - while ((skb = flow->q->dequeue(flow->q))) { - if (!atm_may_send(flow->vcc,skb->truesize)) { - (void) flow->q->ops->requeue(skb,flow->q); - break; - } - D2PRINTK("atm_tc_deqeueue: sending on class %p\n",flow); - /* remove any LL header somebody else has attached */ - skb_pull(skb,(char *) skb->nh.iph-(char *) skb->data); - if (skb_headroom(skb) < flow->hdr_len) { - struct sk_buff *new; - - new = skb_realloc_headroom(skb,flow->hdr_len); - dev_kfree_skb(skb); - if (!new) continue; - skb = new; - } - D2PRINTK("sch_atm_dequeue: ip %p, data %p\n", - skb->nh.iph,skb->data); - ATM_SKB(skb)->vcc = flow->vcc; - memcpy(skb_push(skb,flow->hdr_len),flow->hdr, - flow->hdr_len); - atomic_add(skb->truesize,&flow->vcc->tx_inuse); - ATM_SKB(skb)->iovcnt = 0; - /* atm.atm_options are already set by atm_tc_enqueue */ - (void) flow->vcc->send(flow->vcc,skb); - } -} - - -static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct sk_buff *skb; - - D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n",sch,p); - tasklet_schedule(&p->task); - skb = p->link.q->dequeue(p->link.q); - if (skb) sch->q.qlen--; - return skb; -} - - -static int atm_tc_requeue(struct sk_buff *skb,struct Qdisc *sch) -{ - struct atm_qdisc_data *p = PRIV(sch); - int ret; - - D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); - ret = p->link.q->ops->requeue(skb,p->link.q); - if (!ret) sch->q.qlen++; - else { - sch->stats.drops++; - p->link.stats.drops++; - } - return ret; -} - - -static int atm_tc_drop(struct Qdisc *sch) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow; - - DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n",sch,p); - for (flow = p->flows; flow; flow = flow->next) - if (flow->q->ops->drop && flow->q->ops->drop(flow->q)) - return 1; - return 0; -} - - -static int atm_tc_init(struct Qdisc *sch,struct rtattr *opt) -{ - struct atm_qdisc_data *p = PRIV(sch); - - DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); - memset(p,0,sizeof(*p)); - p->flows = &p->link; - if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops))) - p->link.q = &noop_qdisc; - DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q); - p->link.filter_list = NULL; - p->link.vcc = NULL; - p->link.sock = NULL; - p->link.classid = sch->handle; - p->link.ref = 1; - p->link.next = NULL; - tasklet_init(&p->task,sch_atm_dequeue,(unsigned long) sch); - MOD_INC_USE_COUNT; - return 0; -} - - -static void atm_tc_reset(struct Qdisc *sch) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow; - - DPRINTK("atm_tc_reset(sch %p,[qdisc %p])\n",sch,p); - for (flow = p->flows; flow; flow = flow->next) qdisc_reset(flow->q); - sch->q.qlen = 0; -} - - -static void atm_tc_destroy(struct Qdisc *sch) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow; - - DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n",sch,p); - /* races ? */ - while ((flow = p->flows)) { - destroy_filters(flow); - if (flow->ref > 1) - printk(KERN_ERR "atm_destroy: %p->ref = %d\n",flow, - flow->ref); - atm_tc_put(sch,(unsigned long) flow); - if (p->flows == flow) { - printk(KERN_ERR "atm_destroy: putting flow %p didn't " - "kill it\n",flow); - p->flows = flow->next; /* brute force */ - break; - } - } - tasklet_kill(&p->task); - MOD_DEC_USE_COUNT; -} - - -static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, - struct sk_buff *skb, struct tcmsg *tcm) -{ - struct atm_qdisc_data *p = PRIV(sch); - struct atm_flow_data *flow = (struct atm_flow_data *) cl; - unsigned char *b = skb->tail; - struct rtattr *rta; - - DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", - sch,p,flow,skb,tcm); - if (!find_flow(p,flow)) return -EINVAL; - tcm->tcm_handle = flow->classid; - rta = (struct rtattr *) b; - RTA_PUT(skb,TCA_OPTIONS,0,NULL); - RTA_PUT(skb,TCA_ATM_HDR,flow->hdr_len,flow->hdr); - if (flow->vcc) { - struct sockaddr_atmpvc pvc; - int state; - - pvc.sap_family = AF_ATMPVC; - pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; - pvc.sap_addr.vpi = flow->vcc->vpi; - pvc.sap_addr.vci = flow->vcc->vci; - RTA_PUT(skb,TCA_ATM_ADDR,sizeof(pvc),&pvc); - state = ATM_VF2VS(flow->vcc->flags); - RTA_PUT(skb,TCA_ATM_STATE,sizeof(state),&state); - } - if (flow->excess) - RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(u32),&flow->classid); - else { - static u32 zero = 0; - - RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(zero),&zero); - } - rta->rta_len = skb->tail-b; - return skb->len; - -rtattr_failure: - skb_trim(skb,b-skb->data); - return -1; -} - -static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb) -{ - return 0; -} - -static struct Qdisc_class_ops atm_class_ops = -{ - atm_tc_graft, /* graft */ - atm_tc_leaf, /* leaf */ - atm_tc_get, /* get */ - atm_tc_put, /* put */ - atm_tc_change, /* change */ - atm_tc_delete, /* delete */ - atm_tc_walk, /* walk */ - - atm_tc_find_tcf, /* tcf_chain */ - atm_tc_bind_filter, /* bind_tcf */ - atm_tc_put, /* unbind_tcf */ - - atm_tc_dump_class, /* dump */ -}; - -struct Qdisc_ops atm_qdisc_ops = -{ - NULL, /* next */ - &atm_class_ops, /* cl_ops */ - "atm", - sizeof(struct atm_qdisc_data), - - atm_tc_enqueue, /* enqueue */ - atm_tc_dequeue, /* dequeue */ - atm_tc_requeue, /* requeue */ - atm_tc_drop, /* drop */ - - atm_tc_init, /* init */ - atm_tc_reset, /* reset */ - atm_tc_destroy, /* destroy */ - NULL, /* change */ - - atm_tc_dump /* dump */ -}; - - -#ifdef MODULE -int init_module(void) -{ - return register_qdisc(&atm_qdisc_ops); -} - - -void cleanup_module(void) -{ - unregister_qdisc(&atm_qdisc_ops); -} -#endif diff -urN ../v2.4.19/linux/net/sched/sch_cbq.c linux/net/sched/sch_cbq.c --- ../v2.4.19/linux/net/sched/sch_cbq.c Sun Mar 31 03:18:29 2002 +++ linux/net/sched/sch_cbq.c Thu Aug 1 23:43:46 2002 @@ -1436,7 +1436,6 @@ q->link.ewma_log = TC_CBQ_DEF_EWMA; q->link.avpkt = q->link.allot/2; q->link.minidle = -0x7FFFFFFF; - q->link.stats.lock = &sch->dev->queue_lock; init_timer(&q->wd_timer); q->wd_timer.data = (unsigned long)sch; @@ -1599,13 +1598,13 @@ if (cbq_dump_attr(skb, &q->link) < 0) goto rtattr_failure; rta->rta_len = skb->tail - b; - spin_lock_bh(&sch->dev->queue_lock); + sch_dev_queue_lock(sch->dev); q->link.xstats.avgidle = q->link.avgidle; if (cbq_copy_xstats(skb, &q->link.xstats)) { - spin_unlock_bh(&sch->dev->queue_lock); + sch_dev_queue_unlock(sch->dev); goto rtattr_failure; } - spin_unlock_bh(&sch->dev->queue_lock); + sch_dev_queue_unlock(sch->dev); return skb->len; rtattr_failure: @@ -1637,17 +1636,17 @@ cl->stats.qlen = cl->q->q.qlen; if (qdisc_copy_stats(skb, &cl->stats)) goto rtattr_failure; - spin_lock_bh(&sch->dev->queue_lock); + sch_dev_queue_lock(sch->dev); cl->xstats.avgidle = cl->avgidle; cl->xstats.undertime = 0; if (!PSCHED_IS_PASTPERFECT(cl->undertime)) cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now); q->link.xstats.avgidle = q->link.avgidle; if (cbq_copy_xstats(skb, &cl->xstats)) { - spin_unlock_bh(&sch->dev->queue_lock); + sch_dev_queue_unlock(sch->dev); goto rtattr_failure; } - spin_unlock_bh(&sch->dev->queue_lock); + sch_dev_queue_unlock(sch->dev); return skb->len; @@ -1760,10 +1759,10 @@ #ifdef CONFIG_NET_CLS_POLICE struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data; - spin_lock_bh(&sch->dev->queue_lock); + sch_dev_queue_lock(sch->dev); if (q->rx_class == cl) q->rx_class = NULL; - spin_unlock_bh(&sch->dev->queue_lock); + sch_dev_queue_unlock(sch->dev); #endif cbq_destroy_class(cl); @@ -1926,7 +1925,6 @@ cl->allot = parent->allot; cl->quantum = cl->allot; cl->weight = cl->R_tab->rate.rate; - cl->stats.lock = &sch->dev->queue_lock; sch_tree_lock(sch); cbq_link_class(cl); diff -urN ../v2.4.19/linux/net/sched/sch_csz.c linux/net/sched/sch_csz.c --- ../v2.4.19/linux/net/sched/sch_csz.c Sun Mar 31 03:18:30 2002 +++ linux/net/sched/sch_csz.c Thu Aug 1 23:53:01 2002 @@ -882,7 +882,7 @@ a = &q->flow[cl]; - spin_lock_bh(&sch->dev->queue_lock); + sch_dev_queue_lock(sch->dev); #if 0 a->rate_log = copt->rate_log; #endif @@ -896,7 +896,7 @@ if (tb[TCA_CSZ_RTAB-1]) memcpy(a->L_tab, RTA_DATA(tb[TCA_CSZ_RTAB-1]), 1024); - spin_unlock_bh(&sch->dev->queue_lock); + sch_dev_queue_unlock(sch->dev); return 0; } /* NI */ @@ -917,14 +917,14 @@ a = &q->flow[cl]; - spin_lock_bh(&sch->dev->queue_lock); + sch_dev_queue_lock(sch->dev); a->fprev->fnext = a->fnext; a->fnext->fprev = a->fprev; a->sprev->snext = a->snext; a->snext->sprev = a->sprev; a->start = a->finish = 0; kfree(xchg(&q->flow[cl].L_tab, NULL)); - spin_unlock_bh(&sch->dev->queue_lock); + sch_dev_queue_unlock(sch->dev); return 0; } diff -urN ../v2.4.19/linux/net/sched/sch_generic.c linux/net/sched/sch_generic.c --- ../v2.4.19/linux/net/sched/sch_generic.c Sat Aug 3 09:13:57 2002 +++ linux/net/sched/sch_generic.c Wed Aug 7 23:26:44 2002 @@ -7,8 +7,8 @@ * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, - * Jamal Hadi Salim, 990601 - * - Ingress support + * Jamal Hadi Salim, + * - Ingress support */ #include @@ -34,34 +34,7 @@ /* Main transmission queue. */ -/* Main qdisc structure lock. - - However, modifications - to data, participating in scheduling must be additionally - protected with dev->queue_lock spinlock. - - The idea is the following: - - enqueue, dequeue are serialized via top level device - spinlock dev->queue_lock. - - tree walking is protected by read_lock(qdisc_tree_lock) - and this lock is used only in process context. - - updates to tree are made only under rtnl semaphore, - hence this lock may be made without local bh disabling. - - qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! - */ -rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED; - -/* - dev->queue_lock serializes queue accesses for this device - AND dev->qdisc pointer itself. - - dev->xmit_lock serializes accesses to device driver. - - dev->queue_lock and dev->xmit_lock are mutually exclusive, - if one is grabbed, another must be free. - */ - +struct Qdisc_head qdisc_head = { &qdisc_head }; /* Kick device. Note, that this procedure can be called by a watchdog timer, so that @@ -71,62 +44,26 @@ >0 - queue is not empty, but throttled. <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. - NOTE: Called under dev->queue_lock with locally disabled BH. + NOTE: Called only from NET BH */ -int qdisc_restart(struct net_device *dev) +int qdisc_restart(struct device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; - /* Dequeue packet */ if ((skb = q->dequeue(q)) != NULL) { - if (spin_trylock(&dev->xmit_lock)) { - /* Remember that the driver is grabbed by us. */ - dev->xmit_lock_owner = smp_processor_id(); - - /* And release queue */ - spin_unlock(&dev->queue_lock); - - if (!netif_queue_stopped(dev)) { - if (netdev_nit) - dev_queue_xmit_nit(skb, dev); - - if (dev->hard_start_xmit(skb, dev) == 0) { - dev->xmit_lock_owner = -1; - spin_unlock(&dev->xmit_lock); - - spin_lock(&dev->queue_lock); - return -1; - } - } + if (netdev_nit) + dev_queue_xmit_nit(skb, dev); - /* Release the driver */ - dev->xmit_lock_owner = -1; - spin_unlock(&dev->xmit_lock); - spin_lock(&dev->queue_lock); - q = dev->qdisc; - } else { - /* So, someone grabbed the driver. */ - - /* It may be transient configuration error, - when hard_start_xmit() recurses. We detect - it by checking xmit owner and drop the - packet when deadloop is detected. - */ - if (dev->xmit_lock_owner == smp_processor_id()) { - kfree_skb(skb); - if (net_ratelimit()) - printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); - return -1; - } - netdev_rx_stat[smp_processor_id()].cpu_collision++; + if (dev->hard_start_xmit(skb, dev) == 0) { + q->tx_last = jiffies; + return -1; } /* Device kicked us out :( This is possible in three cases: - 0. driver is locked 1. fastroute is enabled 2. device cannot determine busy state before start of transmission (f.e. dialout) @@ -134,66 +71,74 @@ */ q->ops->requeue(skb, q); - netif_schedule(dev); - return 1; + return -1; } return q->q.qlen; } -static void dev_watchdog(unsigned long arg) +/* Scan transmission queue and kick devices. + + Deficiency: slow devices (ppp) and fast ones (100Mb ethernet) + share one queue. This means that if we have a lot of loaded ppp channels, + we will scan a long list on every 100Mb EOI. + I have no idea how to solve it using only "anonymous" Linux mark_bh(). + + To change queue from device interrupt? Ough... only not this... + */ + +void qdisc_run_queues(void) { - struct net_device *dev = (struct net_device *)arg; + struct Qdisc_head **hp, *h; - spin_lock(&dev->xmit_lock); - if (dev->qdisc != &noop_qdisc) { - if (netif_device_present(dev) && - netif_running(dev) && - netif_carrier_ok(dev)) { - if (netif_queue_stopped(dev) && - (jiffies - dev->trans_start) > dev->watchdog_timeo) { - printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); - dev->tx_timeout(dev); - } - if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) - dev_hold(dev); + hp = &qdisc_head.forw; + while ((h = *hp) != &qdisc_head) { + int res = -1; + struct Qdisc *q = (struct Qdisc*)h; + struct device *dev = q->dev; + + while (!dev->tbusy && (res = qdisc_restart(dev)) < 0) + /* NOTHING */; + + /* An explanation is necessary here. + qdisc_restart called dev->hard_start_xmit, + if device is virtual, it could trigger one more + dev_queue_xmit and a new device could appear + in the active chain. In this case we cannot unlink + the empty queue, because we lost the back pointer. + No problem, we will unlink it during the next round. + */ + + if (res == 0 && *hp == h) { + *hp = h->forw; + h->forw = NULL; + continue; } + hp = &h->forw; } - spin_unlock(&dev->xmit_lock); - - dev_put(dev); } -static void dev_watchdog_init(struct net_device *dev) -{ - init_timer(&dev->watchdog_timer); - dev->watchdog_timer.data = (unsigned long)dev; - dev->watchdog_timer.function = dev_watchdog; -} +/* Periodic watchdoc timer to recover from hard/soft device bugs. */ + +static void dev_do_watchdog(unsigned long dummy); + +static struct timer_list dev_watchdog = + { NULL, NULL, 0L, 0L, &dev_do_watchdog }; -void __netdev_watchdog_up(struct net_device *dev) +static void dev_do_watchdog(unsigned long dummy) { - if (dev->tx_timeout) { - if (dev->watchdog_timeo <= 0) - dev->watchdog_timeo = 5*HZ; - if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) - dev_hold(dev); + struct Qdisc_head *h; + + for (h = qdisc_head.forw; h != &qdisc_head; h = h->forw) { + struct Qdisc *q = (struct Qdisc*)h; + struct device *dev = q->dev; + if (dev->tbusy && jiffies - q->tx_last > q->tx_timeo) + qdisc_restart(dev); } + dev_watchdog.expires = jiffies + 5*HZ; + add_timer(&dev_watchdog); } -static void dev_watchdog_up(struct net_device *dev) -{ - spin_lock_bh(&dev->xmit_lock); - __netdev_watchdog_up(dev); - spin_unlock_bh(&dev->xmit_lock); -} -static void dev_watchdog_down(struct net_device *dev) -{ - spin_lock_bh(&dev->xmit_lock); - if (del_timer(&dev->watchdog_timer)) - __dev_put(dev); - spin_unlock_bh(&dev->xmit_lock); -} /* "NOOP" scheduler: the best scheduler, recommended for all interfaces under all circumstances. It is difficult to invent anything faster or @@ -236,6 +181,7 @@ struct Qdisc noop_qdisc = { + { NULL }, noop_enqueue, noop_dequeue, TCQ_F_BUILTIN, @@ -258,6 +204,7 @@ struct Qdisc noqueue_qdisc = { + { NULL }, NULL, noop_dequeue, TCQ_F_BUILTIN, @@ -360,7 +307,7 @@ pfifo_fast_reset, }; -struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) +struct Qdisc * qdisc_create_dflt(struct device *dev, struct Qdisc_ops *ops) { struct Qdisc *sch; int size = sizeof(*sch) + ops->priv_size; @@ -375,7 +322,6 @@ sch->enqueue = ops->enqueue; sch->dequeue = ops->dequeue; sch->dev = dev; - sch->stats.lock = &dev->queue_lock; atomic_set(&sch->refcnt, 1); if (!ops->init || ops->init(sch, NULL) == 0) return sch; @@ -384,8 +330,6 @@ return NULL; } -/* Under dev->queue_lock and BH! */ - void qdisc_reset(struct Qdisc *qdisc) { struct Qdisc_ops *ops = qdisc->ops; @@ -394,12 +338,10 @@ ops->reset(qdisc); } -/* Under dev->queue_lock and BH! */ - void qdisc_destroy(struct Qdisc *qdisc) { struct Qdisc_ops *ops = qdisc->ops; - struct net_device *dev; + struct device *dev; if (!atomic_dec_and_test(&qdisc->refcnt)) return; @@ -429,7 +371,7 @@ } -void dev_activate(struct net_device *dev) +void dev_activate(struct device *dev) { /* No queueing discipline is attached to device; create default one i.e. pfifo_fast for devices, @@ -448,73 +390,67 @@ } else { qdisc = &noqueue_qdisc; } - write_lock(&qdisc_tree_lock); dev->qdisc_sleeping = qdisc; - write_unlock(&qdisc_tree_lock); } - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) { - dev->trans_start = jiffies; - dev_watchdog_up(dev); + dev->qdisc->tx_timeo = 5*HZ; + dev->qdisc->tx_last = jiffies - dev->qdisc->tx_timeo; + if (!del_timer(&dev_watchdog)) + dev_watchdog.expires = jiffies + 5*HZ; + add_timer(&dev_watchdog); } - spin_unlock_bh(&dev->queue_lock); + sch_dev_queue_unlock(dev); } -void dev_deactivate(struct net_device *dev) +void dev_deactivate(struct device *dev) { struct Qdisc *qdisc; - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); + qdisc = dev->qdisc; dev->qdisc = &noop_qdisc; qdisc_reset(qdisc); - spin_unlock_bh(&dev->queue_lock); + if (qdisc->h.forw) { + struct Qdisc_head **hp, *h; - dev_watchdog_down(dev); - - while (test_bit(__LINK_STATE_SCHED, &dev->state)) { - current->policy |= SCHED_YIELD; - schedule(); + for (hp = &qdisc_head.forw; (h = *hp) != &qdisc_head; hp = &h->forw) { + if (h == &qdisc->h) { + *hp = h->forw; + h->forw = NULL; + break; + } + } } - spin_unlock_wait(&dev->xmit_lock); + sch_dev_queue_unlock(dev); } -void dev_init_scheduler(struct net_device *dev) +void dev_init_scheduler(struct device *dev) { - write_lock(&qdisc_tree_lock); - spin_lock_bh(&dev->queue_lock); dev->qdisc = &noop_qdisc; - spin_unlock_bh(&dev->queue_lock); dev->qdisc_sleeping = &noop_qdisc; dev->qdisc_list = NULL; - write_unlock(&qdisc_tree_lock); - - dev_watchdog_init(dev); } -void dev_shutdown(struct net_device *dev) +void dev_shutdown(struct device *dev) { struct Qdisc *qdisc; - write_lock(&qdisc_tree_lock); - spin_lock_bh(&dev->queue_lock); + sch_dev_queue_lock(dev); qdisc = dev->qdisc_sleeping; dev->qdisc = &noop_qdisc; dev->qdisc_sleeping = &noop_qdisc; qdisc_destroy(qdisc); -#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) if ((qdisc = dev->qdisc_ingress) != NULL) { dev->qdisc_ingress = NULL; qdisc_destroy(qdisc); } -#endif BUG_TRAP(dev->qdisc_list == NULL); - BUG_TRAP(!timer_pending(&dev->watchdog_timer)); dev->qdisc_list = NULL; - spin_unlock_bh(&dev->queue_lock); - write_unlock(&qdisc_tree_lock); + sch_dev_queue_unlock(dev); } diff -urN ../v2.4.19/linux/net/sched/sch_ingress.c linux/net/sched/sch_ingress.c --- ../v2.4.19/linux/net/sched/sch_ingress.c Sat Aug 3 09:13:57 2002 +++ linux/net/sched/sch_ingress.c Sun Aug 4 18:02:57 2002 @@ -1,29 +1,17 @@ -/* net/sched/sch_ingress.c - Ingress qdisc - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * Authors: Jamal Hadi Salim 1999 - */ +/* net/sched/sch_ingress.c - Ingress qdisc */ + +/* Written 1999 by Jamal Hadi Salim */ + #include #include #include #include -#include +#include /* for pkt_sched */ #include -#include -#include +#include #include #include -#include -#include -#include -#include -#include -#include - #undef DEBUG_INGRESS @@ -43,9 +31,6 @@ #define PRIV(sch) ((struct ingress_qdisc_data *) (sch)->data) -/* Thanks to Doron Oz for this hack -*/ -static int nf_registered = 0; struct ingress_qdisc_data { struct Qdisc *q; @@ -149,7 +134,7 @@ #ifdef CONFIG_NET_CLS_POLICE switch (result) { case TC_POLICE_SHOT: - result = NF_DROP; + result = FW_BLOCK; sch->stats.drops++; break; case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */ @@ -158,7 +143,7 @@ default: sch->stats.packets++; sch->stats.bytes += skb->len; - result = NF_ACCEPT; + result = FW_ACCEPT; break; }; #else @@ -199,68 +184,17 @@ return 0; } -static unsigned int -ing_hook(unsigned int hook, struct sk_buff **pskb, - const struct net_device *indev, - const struct net_device *outdev, - int (*okfn)(struct sk_buff *)) -{ - - struct Qdisc *q; - struct sk_buff *skb = *pskb; - struct net_device *dev = skb->dev; - int fwres=NF_ACCEPT; - - DPRINTK("ing_hook: skb %s dev=%s len=%u\n", - skb->sk ? "(owned)" : "(unowned)", - skb->dev ? (*pskb)->dev->name : "(no dev)", - skb->len); - -/* -revisit later: Use a private since lock dev->queue_lock is also -used on the egress (might slow things for an iota) -*/ - - if (dev->qdisc_ingress) { - spin_lock(&dev->queue_lock); - if ((q = dev->qdisc_ingress) != NULL) - fwres = q->enqueue(skb, q); - spin_unlock(&dev->queue_lock); - } - - return fwres; -} - -/* after ipt_filter */ -static struct nf_hook_ops ing_ops = -{ - { NULL, NULL}, - ing_hook, - PF_INET, - NF_IP_PRE_ROUTING, - NF_IP_PRI_FILTER + 1 -}; int ingress_init(struct Qdisc *sch,struct rtattr *opt) { struct ingress_qdisc_data *p = PRIV(sch); - if (!nf_registered) { - if (nf_register_hook(&ing_ops) < 0) { - printk("ingress qdisc registration error \n"); - goto error; - } - nf_registered++; - } - DPRINTK("ingress_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt); memset(p, 0, sizeof(*p)); p->filter_list = NULL; p->q = &noop_qdisc; MOD_INC_USE_COUNT; return 0; -error: - return -EINVAL; } @@ -280,10 +214,6 @@ */ } -/* ------------------------------------------------------------- */ - - -/* ------------------------------------------------------------- */ static void ingress_destroy(struct Qdisc *sch) { @@ -379,8 +309,5 @@ void cleanup_module(void) { unregister_qdisc(&ingress_qdisc_ops); - if (nf_registered) - nf_unregister_hook(&ing_ops); } #endif -MODULE_LICENSE("GPL"); diff -urN ../v2.4.19/linux/net/sched/sch_red.c linux/net/sched/sch_red.c --- ../v2.4.19/linux/net/sched/sch_red.c Sun Mar 31 03:18:30 2002 +++ linux/net/sched/sch_red.c Fri Aug 2 02:41:03 2002 @@ -39,7 +39,6 @@ #include #include #include -#include #define RED_ECN_ECT 0x02 #define RED_ECN_CE 0x01 diff -urN ../v2.4.19/linux/net/sched/sch_teql.c linux/net/sched/sch_teql.c --- ../v2.4.19/linux/net/sched/sch_teql.c Tue Nov 13 01:29:33 2001 +++ linux/net/sched/sch_teql.c Sun Aug 4 18:06:40 2002 @@ -67,9 +67,10 @@ struct teql_master { struct Qdisc_ops qops; - struct net_device dev; + struct device dev; struct Qdisc *slaves; struct net_device_stats stats; + char name[IFNAMSIZ]; }; struct teql_sched_data @@ -89,7 +90,7 @@ static int teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) { - struct net_device *dev = sch->dev; + struct device *dev = sch->dev; struct teql_sched_data *q = (struct teql_sched_data *)sch->data; __skb_queue_tail(&q->q, skb); @@ -122,10 +123,11 @@ skb = __skb_dequeue(&dat->q); if (skb == NULL) { - struct net_device *m = dat->m->dev.qdisc->dev; + struct device *m = dat->m->dev.qdisc->dev; if (m) { + m->tbusy = 0; dat->m->slaves = sch; - netif_wake_queue(m); + qdisc_restart(m); } } sch->q.qlen = dat->q.qlen + dat->m->dev.qdisc->q.qlen; @@ -165,9 +167,9 @@ master->slaves = NEXT_SLAVE(q); if (q == master->slaves) { master->slaves = NULL; - spin_lock_bh(&master->dev.queue_lock); + sch_dev_queue_lock(&master->dev); qdisc_reset(master->dev.qdisc); - spin_unlock_bh(&master->dev.queue_lock); + sch_dev_queue_unlock(&master->dev); } } skb_queue_purge(&dat->q); @@ -183,7 +185,7 @@ static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt) { - struct net_device *dev = sch->dev; + struct device *dev = sch->dev; struct teql_master *m = (struct teql_master*)sch->ops; struct teql_sched_data *q = (struct teql_sched_data *)sch->data; @@ -230,7 +232,7 @@ /* "teql*" netdevice routines */ static int -__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) +__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct device *dev) { struct teql_sched_data *q = (void*)dev->qdisc->data; struct neighbour *mn = skb->dst->neighbour; @@ -242,16 +244,12 @@ memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) { atomic_inc(&n->refcnt); } else { - n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev); - if (IS_ERR(n)) - return PTR_ERR(n); + n = __neigh_lookup(mn->tbl, mn->primary_key, dev, 1); + if (n == NULL) + return -ENOBUFS; } if (neigh_event_send(n, skb_res) == 0) { - int err; - read_lock(&n->lock); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len); - read_unlock(&n->lock); - if (err < 0) { + if (dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len) < 0) { neigh_release(n); return -EINVAL; } @@ -263,7 +261,7 @@ } static __inline__ int -teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) +teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct device *dev) { if (dev->hard_header == NULL || skb->dst == NULL || @@ -272,7 +270,7 @@ return __teql_resolve(skb, skb_res, dev); } -static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) +static int teql_master_xmit(struct sk_buff *skb, struct device *dev) { struct teql_master *master = (void*)dev->priv; struct Qdisc *start, *q; @@ -281,6 +279,8 @@ int len = skb->len; struct sk_buff *skb_res = NULL; + dev->tbusy = 1; + start = master->slaves; restart: @@ -291,37 +291,35 @@ goto drop; do { - struct net_device *slave = q->dev; + struct device *slave = q->dev; if (slave->qdisc_sleeping != q) continue; - if (netif_queue_stopped(slave) || ! netif_running(slave)) { + if (slave->tbusy) { busy = 1; continue; } + if (q->h.forw == NULL) { + q->h.forw = qdisc_head.forw; + qdisc_head.forw = &q->h; + } + switch (teql_resolve(skb, skb_res, slave)) { case 0: - if (spin_trylock(&slave->xmit_lock)) { - slave->xmit_lock_owner = smp_processor_id(); - if (!netif_queue_stopped(slave) && - slave->hard_start_xmit(skb, slave) == 0) { - slave->xmit_lock_owner = -1; - spin_unlock(&slave->xmit_lock); - master->slaves = NEXT_SLAVE(q); - netif_wake_queue(dev); - master->stats.tx_packets++; - master->stats.tx_bytes += len; + if (slave->hard_start_xmit(skb, slave) == 0) { + master->slaves = NEXT_SLAVE(q); + dev->tbusy = 0; + master->stats.tx_packets++; + master->stats.tx_bytes += len; return 0; - } - slave->xmit_lock_owner = -1; - spin_unlock(&slave->xmit_lock); } - if (netif_queue_stopped(dev)) + if (dev->tbusy) busy = 1; break; case 1: master->slaves = NEXT_SLAVE(q); + dev->tbusy = 0; return 0; default: nores = 1; @@ -335,10 +333,9 @@ goto restart; } - if (busy) { - netif_stop_queue(dev); + dev->tbusy = busy; + if (busy) return 1; - } master->stats.tx_errors++; drop: @@ -347,7 +344,7 @@ return 0; } -static int teql_master_open(struct net_device *dev) +static int teql_master_open(struct device *dev) { struct Qdisc * q; struct teql_master *m = (void*)dev->priv; @@ -361,7 +358,7 @@ q = m->slaves; do { - struct net_device *slave = q->dev; + struct device *slave = q->dev; if (slave == NULL) return -EUNATCH; @@ -385,25 +382,24 @@ m->dev.mtu = mtu; m->dev.flags = (m->dev.flags&~FMASK) | flags; - netif_start_queue(&m->dev); + m->dev.tbusy = 0; MOD_INC_USE_COUNT; return 0; } -static int teql_master_close(struct net_device *dev) +static int teql_master_close(struct device *dev) { - netif_stop_queue(dev); MOD_DEC_USE_COUNT; return 0; } -static struct net_device_stats *teql_master_stats(struct net_device *dev) +static struct net_device_stats *teql_master_stats(struct device *dev) { struct teql_master *m = (void*)dev->priv; return &m->stats; } -static int teql_master_mtu(struct net_device *dev, int new_mtu) +static int teql_master_mtu(struct device *dev, int new_mtu) { struct teql_master *m = (void*)dev->priv; struct Qdisc *q; @@ -423,14 +419,14 @@ return 0; } -static int teql_master_init(struct net_device *dev) +static int teql_master_init(struct device *dev) { dev->open = teql_master_open; dev->hard_start_xmit = teql_master_xmit; dev->stop = teql_master_close; dev->get_stats = teql_master_stats; dev->change_mtu = teql_master_mtu; - dev->type = ARPHRD_VOID; + dev->type = 0; dev->mtu = 1500; dev->tx_queue_len = 100; dev->flags = IFF_NOARP; @@ -460,7 +456,7 @@ #ifdef MODULE int init_module(void) #else -int __init teql_init(void) +__initfunc(int teql_init(void)) #endif { int err; @@ -468,10 +464,11 @@ rtnl_lock(); the_master.dev.priv = (void*)&the_master; + the_master.dev.name = (void*)&the_master.name; err = dev_alloc_name(&the_master.dev, "teql%d"); if (err < 0) return err; - memcpy(the_master.qops.id, the_master.dev.name, IFNAMSIZ); + memcpy(the_master.qops.id, the_master.name, IFNAMSIZ); the_master.dev.init = teql_master_init; err = register_netdevice(&the_master.dev); @@ -493,4 +490,3 @@ rtnl_unlock(); } #endif -MODULE_LICENSE("GPL");