00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075 #ifdef DDE_LINUX
00076 #include "local.h"
00077 #include <l4/dde/linux26/dde26_net.h>
00078 #endif
00079
00080 #include <asm/uaccess.h>
00081 #include <asm/system.h>
00082 #include <linux/bitops.h>
00083 #include <linux/capability.h>
00084 #include <linux/cpu.h>
00085 #include <linux/types.h>
00086 #include <linux/kernel.h>
00087 #include <linux/sched.h>
00088 #include <linux/mutex.h>
00089 #include <linux/string.h>
00090 #include <linux/mm.h>
00091 #include <linux/socket.h>
00092 #include <linux/sockios.h>
00093 #include <linux/errno.h>
00094 #include <linux/interrupt.h>
00095 #include <linux/if_ether.h>
00096 #include <linux/netdevice.h>
00097 #include <linux/etherdevice.h>
00098 #include <linux/ethtool.h>
00099 #include <linux/notifier.h>
00100 #include <linux/skbuff.h>
00101 #include <net/net_namespace.h>
00102 #include <net/sock.h>
00103 #include <linux/rtnetlink.h>
00104 #include <linux/proc_fs.h>
00105 #include <linux/seq_file.h>
00106 #include <linux/stat.h>
00107 #include <linux/if_bridge.h>
00108 #include <linux/if_macvlan.h>
00109 #include <net/dst.h>
00110 #include <net/pkt_sched.h>
00111 #include <net/checksum.h>
00112 #include <linux/highmem.h>
00113 #include <linux/init.h>
00114 #include <linux/kmod.h>
00115 #include <linux/module.h>
00116 #include <linux/netpoll.h>
00117 #include <linux/rcupdate.h>
00118 #include <linux/delay.h>
00119 #include <net/wext.h>
00120 #include <net/iw_handler.h>
00121 #include <asm/current.h>
00122 #include <linux/audit.h>
00123 #include <linux/dmaengine.h>
00124 #include <linux/err.h>
00125 #include <linux/ctype.h>
00126 #include <linux/if_arp.h>
00127 #include <linux/if_vlan.h>
00128 #include <linux/ip.h>
00129 #include <net/ip.h>
00130 #include <linux/ipv6.h>
00131 #include <linux/in.h>
00132 #include <linux/jhash.h>
00133 #include <linux/random.h>
00134
00135 #include "net-sysfs.h"
00136
00137
00138 #define MAX_GRO_SKBS 8
00139
00140
00141 #define GRO_MAX_HEAD (MAX_HEADER + 128)
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171 #define PTYPE_HASH_SIZE (16)
00172 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
00173
00174 static DEFINE_SPINLOCK(ptype_lock);
00175 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
00176 static struct list_head ptype_all __read_mostly;
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197 DEFINE_RWLOCK(dev_base_lock);
00198
00199 EXPORT_SYMBOL(dev_base_lock);
00200
00201 #define NETDEV_HASHBITS 8
00202 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
00203
00204 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
00205 {
00206 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
00207 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
00208 }
00209
00210 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
00211 {
00212 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
00213 }
00214
00215
00216 static int list_netdevice(struct net_device *dev)
00217 {
00218 struct net *net = dev_net(dev);
00219
00220 ASSERT_RTNL();
00221
00222 write_lock_bh(&dev_base_lock);
00223 list_add_tail(&dev->dev_list, &net->dev_base_head);
00224 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
00225 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
00226 write_unlock_bh(&dev_base_lock);
00227 return 0;
00228 }
00229
00230
00231 static void unlist_netdevice(struct net_device *dev)
00232 {
00233 ASSERT_RTNL();
00234
00235
00236 write_lock_bh(&dev_base_lock);
00237 list_del(&dev->dev_list);
00238 hlist_del(&dev->name_hlist);
00239 hlist_del(&dev->index_hlist);
00240 write_unlock_bh(&dev_base_lock);
00241 }
00242
00243
00244
00245
00246
00247 static RAW_NOTIFIER_HEAD(netdev_chain);
00248
00249
00250
00251
00252
00253
00254 DEFINE_PER_CPU(struct softnet_data, softnet_data);
00255
00256 #ifdef CONFIG_LOCKDEP
00257
00258
00259
00260
00261 static const unsigned short netdev_lock_type[] =
00262 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
00263 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
00264 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
00265 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
00266 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
00267 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
00268 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
00269 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
00270 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
00271 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
00272 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
00273 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
00274 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
00275 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
00276 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
00277
00278 static const char *netdev_lock_name[] =
00279 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
00280 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
00281 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
00282 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
00283 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
00284 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
00285 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
00286 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
00287 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
00288 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
00289 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
00290 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
00291 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
00292 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
00293 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
00294
00295 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
00296 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
00297
00298 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
00299 {
00300 int i;
00301
00302 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
00303 if (netdev_lock_type[i] == dev_type)
00304 return i;
00305
00306 return ARRAY_SIZE(netdev_lock_type) - 1;
00307 }
00308
00309 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
00310 unsigned short dev_type)
00311 {
00312 int i;
00313
00314 i = netdev_lock_pos(dev_type);
00315 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
00316 netdev_lock_name[i]);
00317 }
00318
00319 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
00320 {
00321 int i;
00322
00323 i = netdev_lock_pos(dev->type);
00324 lockdep_set_class_and_name(&dev->addr_list_lock,
00325 &netdev_addr_lock_key[i],
00326 netdev_lock_name[i]);
00327 }
00328 #else
00329 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
00330 unsigned short dev_type)
00331 {
00332 }
00333 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
00334 {
00335 }
00336 #endif
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373 void dev_add_pack(struct packet_type *pt)
00374 {
00375 int hash;
00376
00377 spin_lock_bh(&ptype_lock);
00378 if (pt->type == htons(ETH_P_ALL))
00379 list_add_rcu(&pt->list, &ptype_all);
00380 else {
00381 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
00382 list_add_rcu(&pt->list, &ptype_base[hash]);
00383 }
00384 spin_unlock_bh(&ptype_lock);
00385 }
00386
00387
00388
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400 void __dev_remove_pack(struct packet_type *pt)
00401 {
00402 struct list_head *head;
00403 struct packet_type *pt1;
00404
00405 spin_lock_bh(&ptype_lock);
00406
00407 if (pt->type == htons(ETH_P_ALL))
00408 head = &ptype_all;
00409 else
00410 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
00411
00412 list_for_each_entry(pt1, head, list) {
00413 if (pt == pt1) {
00414 list_del_rcu(&pt->list);
00415 goto out;
00416 }
00417 }
00418
00419 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
00420 out:
00421 spin_unlock_bh(&ptype_lock);
00422 }
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435 void dev_remove_pack(struct packet_type *pt)
00436 {
00437 __dev_remove_pack(pt);
00438
00439 synchronize_net();
00440 }
00441
00442
00443
00444
00445
00446
00447
00448
00449 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460 static int netdev_boot_setup_add(char *name, struct ifmap *map)
00461 {
00462 struct netdev_boot_setup *s;
00463 int i;
00464
00465 s = dev_boot_setup;
00466 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
00467 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
00468 memset(s[i].name, 0, sizeof(s[i].name));
00469 strlcpy(s[i].name, name, IFNAMSIZ);
00470 memcpy(&s[i].map, map, sizeof(s[i].map));
00471 break;
00472 }
00473 }
00474
00475 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
00476 }
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487 int netdev_boot_setup_check(struct net_device *dev)
00488 {
00489 struct netdev_boot_setup *s = dev_boot_setup;
00490 int i;
00491
00492 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
00493 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
00494 !strcmp(dev->name, s[i].name)) {
00495 dev->irq = s[i].map.irq;
00496 dev->base_addr = s[i].map.base_addr;
00497 dev->mem_start = s[i].map.mem_start;
00498 dev->mem_end = s[i].map.mem_end;
00499 return 1;
00500 }
00501 }
00502 return 0;
00503 }
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513
00514
00515
00516 unsigned long netdev_boot_base(const char *prefix, int unit)
00517 {
00518 const struct netdev_boot_setup *s = dev_boot_setup;
00519 char name[IFNAMSIZ];
00520 int i;
00521
00522 sprintf(name, "%s%d", prefix, unit);
00523
00524
00525
00526
00527
00528 if (__dev_get_by_name(&init_net, name))
00529 return 1;
00530
00531 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
00532 if (!strcmp(name, s[i].name))
00533 return s[i].map.base_addr;
00534 return 0;
00535 }
00536
00537
00538
00539
00540 int __init netdev_boot_setup(char *str)
00541 {
00542 int ints[5];
00543 struct ifmap map;
00544
00545 str = get_options(str, ARRAY_SIZE(ints), ints);
00546 if (!str || !*str)
00547 return 0;
00548
00549
00550 memset(&map, 0, sizeof(map));
00551 if (ints[0] > 0)
00552 map.irq = ints[1];
00553 if (ints[0] > 1)
00554 map.base_addr = ints[2];
00555 if (ints[0] > 2)
00556 map.mem_start = ints[3];
00557 if (ints[0] > 3)
00558 map.mem_end = ints[4];
00559
00560
00561 return netdev_boot_setup_add(str, &map);
00562 }
00563
00564 __setup("netdev=", netdev_boot_setup);
00565
00566
00567
00568
00569
00570
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580
00581
00582
00583
00584 struct net_device *__dev_get_by_name(struct net *net, const char *name)
00585 {
00586 struct hlist_node *p;
00587
00588 hlist_for_each(p, dev_name_hash(net, name)) {
00589 struct net_device *dev
00590 = hlist_entry(p, struct net_device, name_hlist);
00591 if (!strncmp(dev->name, name, IFNAMSIZ))
00592 return dev;
00593 }
00594 return NULL;
00595 }
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609 struct net_device *dev_get_by_name(struct net *net, const char *name)
00610 {
00611 struct net_device *dev;
00612
00613 read_lock(&dev_base_lock);
00614 dev = __dev_get_by_name(net, name);
00615 if (dev)
00616 dev_hold(dev);
00617 read_unlock(&dev_base_lock);
00618 return dev;
00619 }
00620
00621
00622
00623
00624
00625
00626
00627
00628
00629
00630
00631
00632
00633 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
00634 {
00635 struct hlist_node *p;
00636
00637 hlist_for_each(p, dev_index_hash(net, ifindex)) {
00638 struct net_device *dev
00639 = hlist_entry(p, struct net_device, index_hlist);
00640 if (dev->ifindex == ifindex)
00641 return dev;
00642 }
00643 return NULL;
00644 }
00645
00646
00647
00648
00649
00650
00651
00652
00653
00654
00655
00656
00657
00658 struct net_device *dev_get_by_index(struct net *net, int ifindex)
00659 {
00660 struct net_device *dev;
00661
00662 read_lock(&dev_base_lock);
00663 dev = __dev_get_by_index(net, ifindex);
00664 if (dev)
00665 dev_hold(dev);
00666 read_unlock(&dev_base_lock);
00667 return dev;
00668 }
00669
00670
00671
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
00686 {
00687 struct net_device *dev;
00688
00689 ASSERT_RTNL();
00690
00691 for_each_netdev(net, dev)
00692 if (dev->type == type &&
00693 !memcmp(dev->dev_addr, ha, dev->addr_len))
00694 return dev;
00695
00696 return NULL;
00697 }
00698
00699 EXPORT_SYMBOL(dev_getbyhwaddr);
00700
00701 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
00702 {
00703 struct net_device *dev;
00704
00705 ASSERT_RTNL();
00706 for_each_netdev(net, dev)
00707 if (dev->type == type)
00708 return dev;
00709
00710 return NULL;
00711 }
00712
00713 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
00714
00715 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
00716 {
00717 struct net_device *dev;
00718
00719 rtnl_lock();
00720 dev = __dev_getfirstbyhwtype(net, type);
00721 if (dev)
00722 dev_hold(dev);
00723 rtnl_unlock();
00724 return dev;
00725 }
00726
00727 EXPORT_SYMBOL(dev_getfirstbyhwtype);
00728
00729
00730
00731
00732
00733
00734
00735
00736
00737
00738
00739
00740
00741 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
00742 {
00743 struct net_device *dev, *ret;
00744
00745 ret = NULL;
00746 read_lock(&dev_base_lock);
00747 for_each_netdev(net, dev) {
00748 if (((dev->flags ^ if_flags) & mask) == 0) {
00749 dev_hold(dev);
00750 ret = dev;
00751 break;
00752 }
00753 }
00754 read_unlock(&dev_base_lock);
00755 return ret;
00756 }
00757
00758
00759
00760
00761
00762
00763
00764
00765
00766 int dev_valid_name(const char *name)
00767 {
00768 if (*name == '\0')
00769 return 0;
00770 if (strlen(name) >= IFNAMSIZ)
00771 return 0;
00772 if (!strcmp(name, ".") || !strcmp(name, ".."))
00773 return 0;
00774
00775 while (*name) {
00776 if (*name == '/' || isspace(*name))
00777 return 0;
00778 name++;
00779 }
00780 return 1;
00781 }
00782
00783
00784
00785
00786
00787
00788
00789
00790
00791
00792
00793
00794
00795
00796
00797
00798 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
00799 {
00800 int i = 0;
00801 const char *p;
00802 const int max_netdevices = 8*PAGE_SIZE;
00803 unsigned long *inuse;
00804 struct net_device *d;
00805
00806 p = strnchr(name, IFNAMSIZ-1, '%');
00807 if (p) {
00808
00809
00810
00811
00812
00813 if (p[1] != 'd' || strchr(p + 2, '%'))
00814 return -EINVAL;
00815
00816
00817 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
00818 if (!inuse)
00819 return -ENOMEM;
00820
00821 for_each_netdev(net, d) {
00822 if (!sscanf(d->name, name, &i))
00823 continue;
00824 if (i < 0 || i >= max_netdevices)
00825 continue;
00826
00827
00828 snprintf(buf, IFNAMSIZ, name, i);
00829 if (!strncmp(buf, d->name, IFNAMSIZ))
00830 set_bit(i, inuse);
00831 }
00832
00833 i = find_first_zero_bit(inuse, max_netdevices);
00834 free_page((unsigned long) inuse);
00835 }
00836
00837 snprintf(buf, IFNAMSIZ, name, i);
00838 if (!__dev_get_by_name(net, buf))
00839 return i;
00840
00841
00842
00843
00844
00845 return -ENFILE;
00846 }
00847
00848
00849
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862 int dev_alloc_name(struct net_device *dev, const char *name)
00863 {
00864 char buf[IFNAMSIZ];
00865 struct net *net;
00866 int ret;
00867
00868 BUG_ON(!dev_net(dev));
00869 net = dev_net(dev);
00870 ret = __dev_alloc_name(net, name, buf);
00871 if (ret >= 0)
00872 strlcpy(dev->name, buf, IFNAMSIZ);
00873 return ret;
00874 }
00875
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885 int dev_change_name(struct net_device *dev, const char *newname)
00886 {
00887 char oldname[IFNAMSIZ];
00888 int err = 0;
00889 int ret;
00890 struct net *net;
00891
00892 ASSERT_RTNL();
00893 BUG_ON(!dev_net(dev));
00894
00895 net = dev_net(dev);
00896 if (dev->flags & IFF_UP)
00897 return -EBUSY;
00898
00899 if (!dev_valid_name(newname))
00900 return -EINVAL;
00901
00902 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
00903 return 0;
00904
00905 memcpy(oldname, dev->name, IFNAMSIZ);
00906
00907 if (strchr(newname, '%')) {
00908 err = dev_alloc_name(dev, newname);
00909 if (err < 0)
00910 return err;
00911 }
00912 else if (__dev_get_by_name(net, newname))
00913 return -EEXIST;
00914 else
00915 strlcpy(dev->name, newname, IFNAMSIZ);
00916
00917 rollback:
00918
00919
00920
00921 if (net == &init_net) {
00922 ret = device_rename(&dev->dev, dev->name);
00923 if (ret) {
00924 memcpy(dev->name, oldname, IFNAMSIZ);
00925 return ret;
00926 }
00927 }
00928
00929 write_lock_bh(&dev_base_lock);
00930 hlist_del(&dev->name_hlist);
00931 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
00932 write_unlock_bh(&dev_base_lock);
00933
00934 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
00935 ret = notifier_to_errno(ret);
00936
00937 if (ret) {
00938 if (err) {
00939 printk(KERN_ERR
00940 "%s: name change rollback failed: %d.\n",
00941 dev->name, ret);
00942 } else {
00943 err = ret;
00944 memcpy(dev->name, oldname, IFNAMSIZ);
00945 goto rollback;
00946 }
00947 }
00948
00949 return err;
00950 }
00951
00952
00953
00954
00955
00956
00957
00958
00959
00960 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
00961 {
00962 ASSERT_RTNL();
00963
00964 if (len >= IFALIASZ)
00965 return -EINVAL;
00966
00967 if (!len) {
00968 if (dev->ifalias) {
00969 kfree(dev->ifalias);
00970 dev->ifalias = NULL;
00971 }
00972 return 0;
00973 }
00974
00975 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
00976 if (!dev->ifalias)
00977 return -ENOMEM;
00978
00979 strlcpy(dev->ifalias, alias, len+1);
00980 return len;
00981 }
00982
00983
00984
00985
00986
00987
00988
00989
00990 void netdev_features_change(struct net_device *dev)
00991 {
00992 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
00993 }
00994 EXPORT_SYMBOL(netdev_features_change);
00995
00996
00997
00998
00999
01000
01001
01002
01003
01004 void netdev_state_change(struct net_device *dev)
01005 {
01006 if (dev->flags & IFF_UP) {
01007 call_netdevice_notifiers(NETDEV_CHANGE, dev);
01008 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
01009 }
01010 }
01011
01012 void netdev_bonding_change(struct net_device *dev)
01013 {
01014 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
01015 }
01016 EXPORT_SYMBOL(netdev_bonding_change);
01017
01018
01019
01020
01021
01022
01023
01024
01025
01026
01027
01028 void dev_load(struct net *net, const char *name)
01029 {
01030 struct net_device *dev;
01031
01032 read_lock(&dev_base_lock);
01033 dev = __dev_get_by_name(net, name);
01034 read_unlock(&dev_base_lock);
01035
01036 if (!dev && capable(CAP_SYS_MODULE))
01037 request_module("%s", name);
01038 }
01039
01040
01041
01042
01043
01044
01045
01046
01047
01048
01049
01050
01051
01052 int dev_open(struct net_device *dev)
01053 {
01054 const struct net_device_ops *ops = dev->netdev_ops;
01055 int ret = 0;
01056
01057 ASSERT_RTNL();
01058
01059
01060
01061
01062
01063 if (dev->flags & IFF_UP)
01064 return 0;
01065
01066
01067
01068
01069 if (!netif_device_present(dev))
01070 return -ENODEV;
01071
01072
01073
01074
01075 set_bit(__LINK_STATE_START, &dev->state);
01076
01077 if (ops->ndo_validate_addr)
01078 ret = ops->ndo_validate_addr(dev);
01079
01080 if (!ret && ops->ndo_open)
01081 ret = ops->ndo_open(dev);
01082
01083
01084
01085
01086
01087 if (ret)
01088 clear_bit(__LINK_STATE_START, &dev->state);
01089 else {
01090
01091
01092
01093 dev->flags |= IFF_UP;
01094
01095
01096
01097
01098 net_dmaengine_get();
01099
01100
01101
01102
01103 dev_set_rx_mode(dev);
01104
01105
01106
01107
01108 dev_activate(dev);
01109
01110
01111
01112
01113 call_netdevice_notifiers(NETDEV_UP, dev);
01114 }
01115
01116 return ret;
01117 }
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128 int dev_close(struct net_device *dev)
01129 {
01130 const struct net_device_ops *ops = dev->netdev_ops;
01131 ASSERT_RTNL();
01132
01133 might_sleep();
01134
01135 if (!(dev->flags & IFF_UP))
01136 return 0;
01137
01138
01139
01140
01141
01142 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
01143
01144 clear_bit(__LINK_STATE_START, &dev->state);
01145
01146
01147
01148
01149
01150
01151
01152 smp_mb__after_clear_bit();
01153
01154 dev_deactivate(dev);
01155
01156
01157
01158
01159
01160
01161
01162
01163 if (ops->ndo_stop)
01164 ops->ndo_stop(dev);
01165
01166
01167
01168
01169
01170 dev->flags &= ~IFF_UP;
01171
01172
01173
01174
01175 call_netdevice_notifiers(NETDEV_DOWN, dev);
01176
01177
01178
01179
01180 net_dmaengine_put();
01181
01182 return 0;
01183 }
01184
01185
01186
01187
01188
01189
01190
01191
01192
01193
01194 void dev_disable_lro(struct net_device *dev)
01195 {
01196 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
01197 dev->ethtool_ops->set_flags) {
01198 u32 flags = dev->ethtool_ops->get_flags(dev);
01199 if (flags & ETH_FLAG_LRO) {
01200 flags &= ~ETH_FLAG_LRO;
01201 dev->ethtool_ops->set_flags(dev, flags);
01202 }
01203 }
01204 WARN_ON(dev->features & NETIF_F_LRO);
01205 }
01206 EXPORT_SYMBOL(dev_disable_lro);
01207
01208
01209 static int dev_boot_phase = 1;
01210
01211
01212
01213
01214
01215
01216
01217
01218
01219
01220
01221
01222
01223
01224
01225
01226
01227
01228
01229
01230 int register_netdevice_notifier(struct notifier_block *nb)
01231 {
01232 struct net_device *dev;
01233 struct net_device *last;
01234 struct net *net;
01235 int err;
01236
01237 rtnl_lock();
01238 err = raw_notifier_chain_register(&netdev_chain, nb);
01239 if (err)
01240 goto unlock;
01241 if (dev_boot_phase)
01242 goto unlock;
01243 for_each_net(net) {
01244 for_each_netdev(net, dev) {
01245 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
01246 err = notifier_to_errno(err);
01247 if (err)
01248 goto rollback;
01249
01250 if (!(dev->flags & IFF_UP))
01251 continue;
01252
01253 nb->notifier_call(nb, NETDEV_UP, dev);
01254 }
01255 }
01256
01257 unlock:
01258 rtnl_unlock();
01259 return err;
01260
01261 rollback:
01262 last = dev;
01263 for_each_net(net) {
01264 for_each_netdev(net, dev) {
01265 if (dev == last)
01266 break;
01267
01268 if (dev->flags & IFF_UP) {
01269 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
01270 nb->notifier_call(nb, NETDEV_DOWN, dev);
01271 }
01272 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
01273 }
01274 }
01275
01276 raw_notifier_chain_unregister(&netdev_chain, nb);
01277 goto unlock;
01278 }
01279
01280
01281
01282
01283
01284
01285
01286
01287
01288
01289
01290 int unregister_netdevice_notifier(struct notifier_block *nb)
01291 {
01292 int err;
01293
01294 rtnl_lock();
01295 err = raw_notifier_chain_unregister(&netdev_chain, nb);
01296 rtnl_unlock();
01297 return err;
01298 }
01299
01300
01301
01302
01303
01304
01305
01306
01307
01308
01309 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
01310 {
01311 return raw_notifier_call_chain(&netdev_chain, val, dev);
01312 }
01313
01314
01315 static atomic_t netstamp_needed = ATOMIC_INIT(0);
01316
01317 void net_enable_timestamp(void)
01318 {
01319 atomic_inc(&netstamp_needed);
01320 }
01321
01322 void net_disable_timestamp(void)
01323 {
01324 atomic_dec(&netstamp_needed);
01325 }
01326
01327 static inline void net_timestamp(struct sk_buff *skb)
01328 {
01329 if (atomic_read(&netstamp_needed))
01330 __net_timestamp(skb);
01331 else
01332 skb->tstamp.tv64 = 0;
01333 }
01334
01335
01336
01337
01338
01339
01340 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
01341 {
01342 struct packet_type *ptype;
01343
01344 net_timestamp(skb);
01345
01346 rcu_read_lock();
01347 list_for_each_entry_rcu(ptype, &ptype_all, list) {
01348
01349
01350
01351 if ((ptype->dev == dev || !ptype->dev) &&
01352 (ptype->af_packet_priv == NULL ||
01353 (struct sock *)ptype->af_packet_priv != skb->sk)) {
01354 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
01355 if (!skb2)
01356 break;
01357
01358
01359
01360
01361
01362 skb_reset_mac_header(skb2);
01363
01364 if (skb_network_header(skb2) < skb2->data ||
01365 skb2->network_header > skb2->tail) {
01366 if (net_ratelimit())
01367 printk(KERN_CRIT "protocol %04x is "
01368 "buggy, dev %s\n",
01369 skb2->protocol, dev->name);
01370 skb_reset_network_header(skb2);
01371 }
01372
01373 skb2->transport_header = skb2->network_header;
01374 skb2->pkt_type = PACKET_OUTGOING;
01375 ptype->func(skb2, skb->dev, ptype, skb->dev);
01376 }
01377 }
01378 rcu_read_unlock();
01379 }
01380
01381
01382 static inline void __netif_reschedule(struct Qdisc *q)
01383 {
01384 struct softnet_data *sd;
01385 unsigned long flags;
01386
01387 local_irq_save(flags);
01388 sd = &__get_cpu_var(softnet_data);
01389 q->next_sched = sd->output_queue;
01390 sd->output_queue = q;
01391 raise_softirq_irqoff(NET_TX_SOFTIRQ);
01392 local_irq_restore(flags);
01393 }
01394
01395 void __netif_schedule(struct Qdisc *q)
01396 {
01397 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
01398 __netif_reschedule(q);
01399 }
01400 EXPORT_SYMBOL(__netif_schedule);
01401
01402 void dev_kfree_skb_irq(struct sk_buff *skb)
01403 {
01404 if (atomic_dec_and_test(&skb->users)) {
01405 struct softnet_data *sd;
01406 unsigned long flags;
01407
01408 local_irq_save(flags);
01409 sd = &__get_cpu_var(softnet_data);
01410 skb->next = sd->completion_queue;
01411 sd->completion_queue = skb;
01412 raise_softirq_irqoff(NET_TX_SOFTIRQ);
01413 local_irq_restore(flags);
01414 }
01415 }
01416 EXPORT_SYMBOL(dev_kfree_skb_irq);
01417
01418 void dev_kfree_skb_any(struct sk_buff *skb)
01419 {
01420 if (in_irq() || irqs_disabled())
01421 dev_kfree_skb_irq(skb);
01422 else
01423 dev_kfree_skb(skb);
01424 }
01425 EXPORT_SYMBOL(dev_kfree_skb_any);
01426
01427
01428
01429
01430
01431
01432
01433
01434 void netif_device_detach(struct net_device *dev)
01435 {
01436 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
01437 netif_running(dev)) {
01438 netif_stop_queue(dev);
01439 }
01440 }
01441 EXPORT_SYMBOL(netif_device_detach);
01442
01443
01444
01445
01446
01447
01448
01449 void netif_device_attach(struct net_device *dev)
01450 {
01451 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
01452 netif_running(dev)) {
01453 netif_wake_queue(dev);
01454 __netdev_watchdog_up(dev);
01455 }
01456 }
01457 EXPORT_SYMBOL(netif_device_attach);
01458
01459 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
01460 {
01461 return ((features & NETIF_F_GEN_CSUM) ||
01462 ((features & NETIF_F_IP_CSUM) &&
01463 protocol == htons(ETH_P_IP)) ||
01464 ((features & NETIF_F_IPV6_CSUM) &&
01465 protocol == htons(ETH_P_IPV6)));
01466 }
01467
01468 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
01469 {
01470 if (can_checksum_protocol(dev->features, skb->protocol))
01471 return true;
01472
01473 if (skb->protocol == htons(ETH_P_8021Q)) {
01474 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
01475 if (can_checksum_protocol(dev->features & dev->vlan_features,
01476 veh->h_vlan_encapsulated_proto))
01477 return true;
01478 }
01479
01480 return false;
01481 }
01482
01483
01484
01485
01486
01487 int skb_checksum_help(struct sk_buff *skb)
01488 {
01489 __wsum csum;
01490 int ret = 0, offset;
01491
01492 if (skb->ip_summed == CHECKSUM_COMPLETE)
01493 goto out_set_summed;
01494
01495 if (unlikely(skb_shinfo(skb)->gso_size)) {
01496
01497 goto out_set_summed;
01498 }
01499
01500 offset = skb->csum_start - skb_headroom(skb);
01501 BUG_ON(offset >= skb_headlen(skb));
01502 csum = skb_checksum(skb, offset, skb->len - offset, 0);
01503
01504 offset += skb->csum_offset;
01505 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
01506
01507 if (skb_cloned(skb) &&
01508 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
01509 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
01510 if (ret)
01511 goto out;
01512 }
01513
01514 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
01515 out_set_summed:
01516 skb->ip_summed = CHECKSUM_NONE;
01517 out:
01518 return ret;
01519 }
01520
01521
01522
01523
01524
01525
01526
01527
01528
01529
01530
01531 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
01532 {
01533 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
01534 struct packet_type *ptype;
01535 __be16 type = skb->protocol;
01536 int err;
01537
01538 skb_reset_mac_header(skb);
01539 skb->mac_len = skb->network_header - skb->mac_header;
01540 __skb_pull(skb, skb->mac_len);
01541
01542 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
01543 struct net_device *dev = skb->dev;
01544 struct ethtool_drvinfo info = {};
01545
01546 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
01547 dev->ethtool_ops->get_drvinfo(dev, &info);
01548
01549 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
01550 "ip_summed=%d",
01551 info.driver, dev ? dev->features : 0L,
01552 skb->sk ? skb->sk->sk_route_caps : 0L,
01553 skb->len, skb->data_len, skb->ip_summed);
01554
01555 if (skb_header_cloned(skb) &&
01556 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
01557 return ERR_PTR(err);
01558 }
01559
01560 rcu_read_lock();
01561 list_for_each_entry_rcu(ptype,
01562 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
01563 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
01564 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
01565 err = ptype->gso_send_check(skb);
01566 segs = ERR_PTR(err);
01567 if (err || skb_gso_ok(skb, features))
01568 break;
01569 __skb_push(skb, (skb->data -
01570 skb_network_header(skb)));
01571 }
01572 segs = ptype->gso_segment(skb, features);
01573 break;
01574 }
01575 }
01576 rcu_read_unlock();
01577
01578 __skb_push(skb, skb->data - skb_mac_header(skb));
01579
01580 return segs;
01581 }
01582
01583 EXPORT_SYMBOL(skb_gso_segment);
01584
01585
01586 #ifdef CONFIG_BUG
01587 void netdev_rx_csum_fault(struct net_device *dev)
01588 {
01589 if (net_ratelimit()) {
01590 printk(KERN_ERR "%s: hw csum failure.\n",
01591 dev ? dev->name : "<unknown>");
01592 dump_stack();
01593 }
01594 }
01595 EXPORT_SYMBOL(netdev_rx_csum_fault);
01596 #endif
01597
01598
01599
01600
01601
01602
01603 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
01604 {
01605 #ifdef CONFIG_HIGHMEM
01606 int i;
01607
01608 if (dev->features & NETIF_F_HIGHDMA)
01609 return 0;
01610
01611 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
01612 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
01613 return 1;
01614
01615 #endif
01616 return 0;
01617 }
01618
01619 struct dev_gso_cb {
01620 void (*destructor)(struct sk_buff *skb);
01621 };
01622
01623 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
01624
01625 static void dev_gso_skb_destructor(struct sk_buff *skb)
01626 {
01627 struct dev_gso_cb *cb;
01628
01629 do {
01630 struct sk_buff *nskb = skb->next;
01631
01632 skb->next = nskb->next;
01633 nskb->next = NULL;
01634 kfree_skb(nskb);
01635 } while (skb->next);
01636
01637 cb = DEV_GSO_CB(skb);
01638 if (cb->destructor)
01639 cb->destructor(skb);
01640 }
01641
01642
01643
01644
01645
01646
01647
01648
01649 static int dev_gso_segment(struct sk_buff *skb)
01650 {
01651 struct net_device *dev = skb->dev;
01652 struct sk_buff *segs;
01653 int features = dev->features & ~(illegal_highdma(dev, skb) ?
01654 NETIF_F_SG : 0);
01655
01656 segs = skb_gso_segment(skb, features);
01657
01658
01659 if (!segs)
01660 return 0;
01661
01662 if (IS_ERR(segs))
01663 return PTR_ERR(segs);
01664
01665 skb->next = segs;
01666 DEV_GSO_CB(skb)->destructor = skb->destructor;
01667 skb->destructor = dev_gso_skb_destructor;
01668
01669 return 0;
01670 }
01671
01672 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
01673 struct netdev_queue *txq)
01674 {
01675 const struct net_device_ops *ops = dev->netdev_ops;
01676
01677 prefetch(&dev->netdev_ops->ndo_start_xmit);
01678 if (likely(!skb->next)) {
01679 if (!list_empty(&ptype_all))
01680 dev_queue_xmit_nit(skb, dev);
01681
01682 if (netif_needs_gso(dev, skb)) {
01683 if (unlikely(dev_gso_segment(skb)))
01684 goto out_kfree_skb;
01685 if (skb->next)
01686 goto gso;
01687 }
01688
01689 return ops->ndo_start_xmit(skb, dev);
01690 }
01691
01692 gso:
01693 do {
01694 struct sk_buff *nskb = skb->next;
01695 int rc;
01696
01697 skb->next = nskb->next;
01698 nskb->next = NULL;
01699 rc = ops->ndo_start_xmit(nskb, dev);
01700 if (unlikely(rc)) {
01701 nskb->next = skb->next;
01702 skb->next = nskb;
01703 return rc;
01704 }
01705 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
01706 return NETDEV_TX_BUSY;
01707 } while (skb->next);
01708
01709 skb->destructor = DEV_GSO_CB(skb)->destructor;
01710
01711 out_kfree_skb:
01712 kfree_skb(skb);
01713 return 0;
01714 }
01715
01716 static u32 simple_tx_hashrnd;
01717 static int simple_tx_hashrnd_initialized = 0;
01718
01719 static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
01720 {
01721 u32 addr1, addr2, ports;
01722 u32 hash, ihl;
01723 u8 ip_proto = 0;
01724
01725 if (unlikely(!simple_tx_hashrnd_initialized)) {
01726 get_random_bytes(&simple_tx_hashrnd, 4);
01727 simple_tx_hashrnd_initialized = 1;
01728 }
01729
01730 switch (skb->protocol) {
01731 case htons(ETH_P_IP):
01732 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
01733 ip_proto = ip_hdr(skb)->protocol;
01734 addr1 = ip_hdr(skb)->saddr;
01735 addr2 = ip_hdr(skb)->daddr;
01736 ihl = ip_hdr(skb)->ihl;
01737 break;
01738 case htons(ETH_P_IPV6):
01739 ip_proto = ipv6_hdr(skb)->nexthdr;
01740 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
01741 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
01742 ihl = (40 >> 2);
01743 break;
01744 default:
01745 return 0;
01746 }
01747
01748
01749 switch (ip_proto) {
01750 case IPPROTO_TCP:
01751 case IPPROTO_UDP:
01752 case IPPROTO_DCCP:
01753 case IPPROTO_ESP:
01754 case IPPROTO_AH:
01755 case IPPROTO_SCTP:
01756 case IPPROTO_UDPLITE:
01757 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
01758 break;
01759
01760 default:
01761 ports = 0;
01762 break;
01763 }
01764
01765 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
01766
01767 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
01768 }
01769
01770 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
01771 struct sk_buff *skb)
01772 {
01773 const struct net_device_ops *ops = dev->netdev_ops;
01774 u16 queue_index = 0;
01775
01776 if (ops->ndo_select_queue)
01777 queue_index = ops->ndo_select_queue(dev, skb);
01778 else if (dev->real_num_tx_queues > 1)
01779 queue_index = simple_tx_hash(dev, skb);
01780
01781 skb_set_queue_mapping(skb, queue_index);
01782 return netdev_get_tx_queue(dev, queue_index);
01783 }
01784
01785
01786
01787
01788
01789
01790
01791
01792
01793
01794
01795
01796
01797
01798
01799
01800
01801
01802
01803
01804
01805
01806
01807
01808
01809
01810 int dev_queue_xmit(struct sk_buff *skb)
01811 {
01812 struct net_device *dev = skb->dev;
01813 struct netdev_queue *txq;
01814 struct Qdisc *q;
01815 int rc = -ENOMEM;
01816
01817
01818 if (netif_needs_gso(dev, skb))
01819 goto gso;
01820
01821 if (skb_shinfo(skb)->frag_list &&
01822 !(dev->features & NETIF_F_FRAGLIST) &&
01823 __skb_linearize(skb))
01824 goto out_kfree_skb;
01825
01826
01827
01828
01829
01830 if (skb_shinfo(skb)->nr_frags &&
01831 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
01832 __skb_linearize(skb))
01833 goto out_kfree_skb;
01834
01835
01836
01837
01838 if (skb->ip_summed == CHECKSUM_PARTIAL) {
01839 skb_set_transport_header(skb, skb->csum_start -
01840 skb_headroom(skb));
01841 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
01842 goto out_kfree_skb;
01843 }
01844
01845 gso:
01846
01847
01848
01849 rcu_read_lock_bh();
01850
01851 txq = dev_pick_tx(dev, skb);
01852 q = rcu_dereference(txq->qdisc);
01853
01854 #ifdef CONFIG_NET_CLS_ACT
01855 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
01856 #endif
01857 if (q->enqueue) {
01858 spinlock_t *root_lock = qdisc_lock(q);
01859
01860 spin_lock(root_lock);
01861
01862 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
01863 kfree_skb(skb);
01864 rc = NET_XMIT_DROP;
01865 } else {
01866 rc = qdisc_enqueue_root(skb, q);
01867 qdisc_run(q);
01868 }
01869 spin_unlock(root_lock);
01870
01871 goto out;
01872 }
01873
01874
01875
01876
01877
01878
01879
01880
01881
01882
01883
01884
01885
01886 if (dev->flags & IFF_UP) {
01887 int cpu = smp_processor_id();
01888
01889 if (txq->xmit_lock_owner != cpu) {
01890
01891 HARD_TX_LOCK(dev, txq, cpu);
01892
01893 if (!netif_tx_queue_stopped(txq)) {
01894 rc = 0;
01895 if (!dev_hard_start_xmit(skb, dev, txq)) {
01896 HARD_TX_UNLOCK(dev, txq);
01897 goto out;
01898 }
01899 }
01900 HARD_TX_UNLOCK(dev, txq);
01901 if (net_ratelimit())
01902 printk(KERN_CRIT "Virtual device %s asks to "
01903 "queue packet!\n", dev->name);
01904 } else {
01905
01906
01907 if (net_ratelimit())
01908 printk(KERN_CRIT "Dead loop on virtual device "
01909 "%s, fix it urgently!\n", dev->name);
01910 }
01911 }
01912
01913 rc = -ENETDOWN;
01914 rcu_read_unlock_bh();
01915
01916 out_kfree_skb:
01917 kfree_skb(skb);
01918 return rc;
01919 out:
01920 rcu_read_unlock_bh();
01921 return rc;
01922 }
01923
01924
01925
01926
01927
01928
01929 int netdev_max_backlog __read_mostly = 1000;
01930 int netdev_budget __read_mostly = 300;
01931 int weight_p __read_mostly = 64;
01932
01933 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
01934
01935
01936
01937
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948
01949
01950
01951 int netif_rx(struct sk_buff *skb)
01952 {
01953 #ifndef DDE_LINUX
01954 struct softnet_data *queue;
01955 unsigned long flags;
01956
01957
01958 if (netpoll_rx(skb))
01959 return NET_RX_DROP;
01960
01961 if (!skb->tstamp.tv64)
01962 net_timestamp(skb);
01963
01964
01965
01966
01967
01968 local_irq_save(flags);
01969 queue = &__get_cpu_var(softnet_data);
01970
01971 __get_cpu_var(netdev_rx_stat).total++;
01972 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
01973 if (queue->input_pkt_queue.qlen) {
01974 enqueue:
01975 dev_hold(skb->dev);
01976 __skb_queue_tail(&queue->input_pkt_queue, skb);
01977 local_irq_restore(flags);
01978 return NET_RX_SUCCESS;
01979 }
01980
01981 napi_schedule(&queue->backlog);
01982 goto enqueue;
01983 }
01984
01985 __get_cpu_var(netdev_rx_stat).dropped++;
01986 local_irq_restore(flags);
01987
01988 kfree_skb(skb);
01989 return NET_RX_DROP;
01990 #else
01991
01992 return l4dde26_do_rx_callback(skb);
01993 #endif
01994 }
01995
01996 int netif_rx_ni(struct sk_buff *skb)
01997 {
01998 int err;
01999
02000 preempt_disable();
02001 err = netif_rx(skb);
02002 if (local_softirq_pending())
02003 do_softirq();
02004 preempt_enable();
02005
02006 return err;
02007 }
02008
02009 EXPORT_SYMBOL(netif_rx_ni);
02010
02011 static void net_tx_action(struct softirq_action *h)
02012 {
02013 struct softnet_data *sd = &__get_cpu_var(softnet_data);
02014
02015 if (sd->completion_queue) {
02016 struct sk_buff *clist;
02017
02018 local_irq_disable();
02019 clist = sd->completion_queue;
02020 sd->completion_queue = NULL;
02021 local_irq_enable();
02022
02023 while (clist) {
02024 struct sk_buff *skb = clist;
02025 clist = clist->next;
02026
02027 WARN_ON(atomic_read(&skb->users));
02028 __kfree_skb(skb);
02029 }
02030 }
02031
02032 if (sd->output_queue) {
02033 struct Qdisc *head;
02034
02035 local_irq_disable();
02036 head = sd->output_queue;
02037 sd->output_queue = NULL;
02038 local_irq_enable();
02039
02040 while (head) {
02041 struct Qdisc *q = head;
02042 spinlock_t *root_lock;
02043
02044 head = head->next_sched;
02045
02046 root_lock = qdisc_lock(q);
02047 if (spin_trylock(root_lock)) {
02048 smp_mb__before_clear_bit();
02049 clear_bit(__QDISC_STATE_SCHED,
02050 &q->state);
02051 qdisc_run(q);
02052 spin_unlock(root_lock);
02053 } else {
02054 if (!test_bit(__QDISC_STATE_DEACTIVATED,
02055 &q->state)) {
02056 __netif_reschedule(q);
02057 } else {
02058 smp_mb__before_clear_bit();
02059 clear_bit(__QDISC_STATE_SCHED,
02060 &q->state);
02061 }
02062 }
02063 }
02064 }
02065 }
02066
02067 static inline int deliver_skb(struct sk_buff *skb,
02068 struct packet_type *pt_prev,
02069 struct net_device *orig_dev)
02070 {
02071 atomic_inc(&skb->users);
02072 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
02073 }
02074
02075 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
02076
02077 struct net_bridge;
02078 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
02079 unsigned char *addr);
02080 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
02081
02082
02083
02084
02085
02086 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
02087 struct sk_buff *skb) __read_mostly;
02088 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
02089 struct packet_type **pt_prev, int *ret,
02090 struct net_device *orig_dev)
02091 {
02092 struct net_bridge_port *port;
02093
02094 if (skb->pkt_type == PACKET_LOOPBACK ||
02095 (port = rcu_dereference(skb->dev->br_port)) == NULL)
02096 return skb;
02097
02098 if (*pt_prev) {
02099 *ret = deliver_skb(skb, *pt_prev, orig_dev);
02100 *pt_prev = NULL;
02101 }
02102
02103 return br_handle_frame_hook(port, skb);
02104 }
02105 #else
02106 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
02107 #endif
02108
02109 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
02110 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
02111 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
02112
02113 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
02114 struct packet_type **pt_prev,
02115 int *ret,
02116 struct net_device *orig_dev)
02117 {
02118 if (skb->dev->macvlan_port == NULL)
02119 return skb;
02120
02121 if (*pt_prev) {
02122 *ret = deliver_skb(skb, *pt_prev, orig_dev);
02123 *pt_prev = NULL;
02124 }
02125 return macvlan_handle_frame_hook(skb);
02126 }
02127 #else
02128 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
02129 #endif
02130
02131 #ifdef CONFIG_NET_CLS_ACT
02132
02133
02134
02135
02136
02137
02138
02139
02140 static int ing_filter(struct sk_buff *skb)
02141 {
02142 struct net_device *dev = skb->dev;
02143 u32 ttl = G_TC_RTTL(skb->tc_verd);
02144 struct netdev_queue *rxq;
02145 int result = TC_ACT_OK;
02146 struct Qdisc *q;
02147
02148 if (MAX_RED_LOOP < ttl++) {
02149 printk(KERN_WARNING
02150 "Redir loop detected Dropping packet (%d->%d)\n",
02151 skb->iif, dev->ifindex);
02152 return TC_ACT_SHOT;
02153 }
02154
02155 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
02156 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
02157
02158 rxq = &dev->rx_queue;
02159
02160 q = rxq->qdisc;
02161 if (q != &noop_qdisc) {
02162 spin_lock(qdisc_lock(q));
02163 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
02164 result = qdisc_enqueue_root(skb, q);
02165 spin_unlock(qdisc_lock(q));
02166 }
02167
02168 return result;
02169 }
02170
02171 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
02172 struct packet_type **pt_prev,
02173 int *ret, struct net_device *orig_dev)
02174 {
02175 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
02176 goto out;
02177
02178 if (*pt_prev) {
02179 *ret = deliver_skb(skb, *pt_prev, orig_dev);
02180 *pt_prev = NULL;
02181 } else {
02182
02183 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
02184 }
02185
02186 switch (ing_filter(skb)) {
02187 case TC_ACT_SHOT:
02188 case TC_ACT_STOLEN:
02189 kfree_skb(skb);
02190 return NULL;
02191 }
02192
02193 out:
02194 skb->tc_verd = 0;
02195 return skb;
02196 }
02197 #endif
02198
02199
02200
02201
02202
02203
02204
02205
02206
02207 void netif_nit_deliver(struct sk_buff *skb)
02208 {
02209 struct packet_type *ptype;
02210
02211 if (list_empty(&ptype_all))
02212 return;
02213
02214 skb_reset_network_header(skb);
02215 skb_reset_transport_header(skb);
02216 skb->mac_len = skb->network_header - skb->mac_header;
02217
02218 rcu_read_lock();
02219 list_for_each_entry_rcu(ptype, &ptype_all, list) {
02220 if (!ptype->dev || ptype->dev == skb->dev)
02221 deliver_skb(skb, ptype, skb->dev);
02222 }
02223 rcu_read_unlock();
02224 }
02225
02226
02227
02228
02229
02230
02231
02232
02233
02234
02235
02236
02237
02238
02239
02240
02241 int netif_receive_skb(struct sk_buff *skb)
02242 {
02243 #ifndef DDE_LINUX
02244 struct packet_type *ptype, *pt_prev;
02245 struct net_device *orig_dev;
02246 struct net_device *null_or_orig;
02247 int ret = NET_RX_DROP;
02248 __be16 type;
02249
02250 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
02251 return NET_RX_SUCCESS;
02252
02253
02254 if (netpoll_receive_skb(skb))
02255 return NET_RX_DROP;
02256
02257 if (!skb->tstamp.tv64)
02258 net_timestamp(skb);
02259
02260 if (!skb->iif)
02261 skb->iif = skb->dev->ifindex;
02262
02263 null_or_orig = NULL;
02264 orig_dev = skb->dev;
02265 if (orig_dev->master) {
02266 if (skb_bond_should_drop(skb))
02267 null_or_orig = orig_dev;
02268 else
02269 skb->dev = orig_dev->master;
02270 }
02271
02272 __get_cpu_var(netdev_rx_stat).total++;
02273
02274 skb_reset_network_header(skb);
02275 skb_reset_transport_header(skb);
02276 skb->mac_len = skb->network_header - skb->mac_header;
02277
02278 pt_prev = NULL;
02279
02280 rcu_read_lock();
02281
02282 #ifdef CONFIG_NET_CLS_ACT
02283 if (skb->tc_verd & TC_NCLS) {
02284 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
02285 goto ncls;
02286 }
02287 #endif
02288
02289 list_for_each_entry_rcu(ptype, &ptype_all, list) {
02290 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
02291 ptype->dev == orig_dev) {
02292 if (pt_prev)
02293 ret = deliver_skb(skb, pt_prev, orig_dev);
02294 pt_prev = ptype;
02295 }
02296 }
02297
02298 #ifdef CONFIG_NET_CLS_ACT
02299 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
02300 if (!skb)
02301 goto out;
02302 ncls:
02303 #endif
02304
02305 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
02306 if (!skb)
02307 goto out;
02308 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
02309 if (!skb)
02310 goto out;
02311
02312 type = skb->protocol;
02313 list_for_each_entry_rcu(ptype,
02314 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
02315 if (ptype->type == type &&
02316 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
02317 ptype->dev == orig_dev)) {
02318 if (pt_prev)
02319 ret = deliver_skb(skb, pt_prev, orig_dev);
02320 pt_prev = ptype;
02321 }
02322 }
02323
02324 if (pt_prev) {
02325 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
02326 } else {
02327 kfree_skb(skb);
02328
02329
02330
02331 ret = NET_RX_DROP;
02332 }
02333
02334 out:
02335 rcu_read_unlock();
02336 return ret;
02337 #else
02338
02339 return l4dde26_do_rx_callback(skb);
02340 #endif
02341 }
02342
02343
02344
02345 static void flush_backlog(void *arg)
02346 {
02347 struct net_device *dev = arg;
02348 struct softnet_data *queue = &__get_cpu_var(softnet_data);
02349 struct sk_buff *skb, *tmp;
02350
02351 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
02352 if (skb->dev == dev) {
02353 __skb_unlink(skb, &queue->input_pkt_queue);
02354 kfree_skb(skb);
02355 }
02356 }
02357
02358 static int napi_gro_complete(struct sk_buff *skb)
02359 {
02360 struct packet_type *ptype;
02361 __be16 type = skb->protocol;
02362 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
02363 int err = -ENOENT;
02364
02365 if (NAPI_GRO_CB(skb)->count == 1)
02366 goto out;
02367
02368 rcu_read_lock();
02369 list_for_each_entry_rcu(ptype, head, list) {
02370 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
02371 continue;
02372
02373 err = ptype->gro_complete(skb);
02374 break;
02375 }
02376 rcu_read_unlock();
02377
02378 if (err) {
02379 WARN_ON(&ptype->list == head);
02380 kfree_skb(skb);
02381 return NET_RX_SUCCESS;
02382 }
02383
02384 out:
02385 skb_shinfo(skb)->gso_size = 0;
02386 __skb_push(skb, -skb_network_offset(skb));
02387 return netif_receive_skb(skb);
02388 }
02389
02390 void napi_gro_flush(struct napi_struct *napi)
02391 {
02392 struct sk_buff *skb, *next;
02393
02394 for (skb = napi->gro_list; skb; skb = next) {
02395 next = skb->next;
02396 skb->next = NULL;
02397 napi_gro_complete(skb);
02398 }
02399
02400 napi->gro_list = NULL;
02401 }
02402 EXPORT_SYMBOL(napi_gro_flush);
02403
02404 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
02405 {
02406 struct sk_buff **pp = NULL;
02407 struct packet_type *ptype;
02408 __be16 type = skb->protocol;
02409 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
02410 int count = 0;
02411 int same_flow;
02412 int mac_len;
02413 int free;
02414
02415 if (!(skb->dev->features & NETIF_F_GRO))
02416 goto normal;
02417
02418 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
02419 goto normal;
02420
02421 rcu_read_lock();
02422 list_for_each_entry_rcu(ptype, head, list) {
02423 struct sk_buff *p;
02424
02425 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
02426 continue;
02427
02428 skb_reset_network_header(skb);
02429 mac_len = skb->network_header - skb->mac_header;
02430 skb->mac_len = mac_len;
02431 NAPI_GRO_CB(skb)->same_flow = 0;
02432 NAPI_GRO_CB(skb)->flush = 0;
02433 NAPI_GRO_CB(skb)->free = 0;
02434
02435 for (p = napi->gro_list; p; p = p->next) {
02436 count++;
02437
02438 if (!NAPI_GRO_CB(p)->same_flow)
02439 continue;
02440
02441 if (p->mac_len != mac_len ||
02442 memcmp(skb_mac_header(p), skb_mac_header(skb),
02443 mac_len))
02444 NAPI_GRO_CB(p)->same_flow = 0;
02445 }
02446
02447 pp = ptype->gro_receive(&napi->gro_list, skb);
02448 break;
02449 }
02450 rcu_read_unlock();
02451
02452 if (&ptype->list == head)
02453 goto normal;
02454
02455 same_flow = NAPI_GRO_CB(skb)->same_flow;
02456 free = NAPI_GRO_CB(skb)->free;
02457
02458 if (pp) {
02459 struct sk_buff *nskb = *pp;
02460
02461 *pp = nskb->next;
02462 nskb->next = NULL;
02463 napi_gro_complete(nskb);
02464 count--;
02465 }
02466
02467 if (same_flow)
02468 goto ok;
02469
02470 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) {
02471 __skb_push(skb, -skb_network_offset(skb));
02472 goto normal;
02473 }
02474
02475 NAPI_GRO_CB(skb)->count = 1;
02476 skb_shinfo(skb)->gso_size = skb->len;
02477 skb->next = napi->gro_list;
02478 napi->gro_list = skb;
02479
02480 ok:
02481 return free;
02482
02483 normal:
02484 return -1;
02485 }
02486 EXPORT_SYMBOL(dev_gro_receive);
02487
02488 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
02489 {
02490 struct sk_buff *p;
02491
02492 for (p = napi->gro_list; p; p = p->next) {
02493 NAPI_GRO_CB(p)->same_flow = 1;
02494 NAPI_GRO_CB(p)->flush = 0;
02495 }
02496
02497 return dev_gro_receive(napi, skb);
02498 }
02499
02500 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
02501 {
02502 if (netpoll_receive_skb(skb))
02503 return NET_RX_DROP;
02504
02505 switch (__napi_gro_receive(napi, skb)) {
02506 case -1:
02507 return netif_receive_skb(skb);
02508
02509 case 1:
02510 kfree_skb(skb);
02511 break;
02512 }
02513
02514 return NET_RX_SUCCESS;
02515 }
02516 EXPORT_SYMBOL(napi_gro_receive);
02517
02518 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
02519 {
02520 __skb_pull(skb, skb_headlen(skb));
02521 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
02522
02523 napi->skb = skb;
02524 }
02525 EXPORT_SYMBOL(napi_reuse_skb);
02526
02527 struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
02528 struct napi_gro_fraginfo *info)
02529 {
02530 struct net_device *dev = napi->dev;
02531 struct sk_buff *skb = napi->skb;
02532
02533 napi->skb = NULL;
02534
02535 if (!skb) {
02536 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
02537 if (!skb)
02538 goto out;
02539
02540 skb_reserve(skb, NET_IP_ALIGN);
02541 }
02542
02543 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
02544 skb_shinfo(skb)->nr_frags = info->nr_frags;
02545 memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
02546
02547 skb->data_len = info->len;
02548 skb->len += info->len;
02549 skb->truesize += info->len;
02550
02551 if (!pskb_may_pull(skb, ETH_HLEN)) {
02552 napi_reuse_skb(napi, skb);
02553 skb = NULL;
02554 goto out;
02555 }
02556
02557 skb->protocol = eth_type_trans(skb, dev);
02558
02559 skb->ip_summed = info->ip_summed;
02560 skb->csum = info->csum;
02561
02562 out:
02563 return skb;
02564 }
02565 EXPORT_SYMBOL(napi_fraginfo_skb);
02566
02567 int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
02568 {
02569 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
02570 int err = NET_RX_DROP;
02571
02572 if (!skb)
02573 goto out;
02574
02575 if (netpoll_receive_skb(skb))
02576 goto out;
02577
02578 err = NET_RX_SUCCESS;
02579
02580 switch (__napi_gro_receive(napi, skb)) {
02581 case -1:
02582 return netif_receive_skb(skb);
02583
02584 case 0:
02585 goto out;
02586 }
02587
02588 napi_reuse_skb(napi, skb);
02589
02590 out:
02591 return err;
02592 }
02593 EXPORT_SYMBOL(napi_gro_frags);
02594
02595 static int process_backlog(struct napi_struct *napi, int quota)
02596 {
02597 int work = 0;
02598 struct softnet_data *queue = &__get_cpu_var(softnet_data);
02599 unsigned long start_time = jiffies;
02600
02601 napi->weight = weight_p;
02602 do {
02603 struct sk_buff *skb;
02604
02605 local_irq_disable();
02606 skb = __skb_dequeue(&queue->input_pkt_queue);
02607 if (!skb) {
02608 local_irq_enable();
02609 napi_complete(napi);
02610 goto out;
02611 }
02612 local_irq_enable();
02613
02614 napi_gro_receive(napi, skb);
02615 } while (++work < quota && jiffies == start_time);
02616
02617 napi_gro_flush(napi);
02618
02619 out:
02620 return work;
02621 }
02622
02623
02624
02625
02626
02627
02628
02629 void __napi_schedule(struct napi_struct *n)
02630 {
02631 unsigned long flags;
02632
02633 local_irq_save(flags);
02634 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
02635 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
02636 local_irq_restore(flags);
02637 }
02638 EXPORT_SYMBOL(__napi_schedule);
02639
02640 void __napi_complete(struct napi_struct *n)
02641 {
02642 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
02643 BUG_ON(n->gro_list);
02644
02645 list_del(&n->poll_list);
02646 smp_mb__before_clear_bit();
02647 clear_bit(NAPI_STATE_SCHED, &n->state);
02648 }
02649 EXPORT_SYMBOL(__napi_complete);
02650
02651 void napi_complete(struct napi_struct *n)
02652 {
02653 unsigned long flags;
02654
02655
02656
02657
02658
02659 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
02660 return;
02661
02662 napi_gro_flush(n);
02663 local_irq_save(flags);
02664 __napi_complete(n);
02665 local_irq_restore(flags);
02666 }
02667 EXPORT_SYMBOL(napi_complete);
02668
02669 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
02670 int (*poll)(struct napi_struct *, int), int weight)
02671 {
02672 INIT_LIST_HEAD(&napi->poll_list);
02673 napi->gro_list = NULL;
02674 napi->skb = NULL;
02675 napi->poll = poll;
02676 napi->weight = weight;
02677 list_add(&napi->dev_list, &dev->napi_list);
02678 napi->dev = dev;
02679 #ifdef CONFIG_NETPOLL
02680 spin_lock_init(&napi->poll_lock);
02681 napi->poll_owner = -1;
02682 #endif
02683 set_bit(NAPI_STATE_SCHED, &napi->state);
02684 }
02685 EXPORT_SYMBOL(netif_napi_add);
02686
02687 void netif_napi_del(struct napi_struct *napi)
02688 {
02689 struct sk_buff *skb, *next;
02690
02691 list_del_init(&napi->dev_list);
02692 kfree_skb(napi->skb);
02693
02694 for (skb = napi->gro_list; skb; skb = next) {
02695 next = skb->next;
02696 skb->next = NULL;
02697 kfree_skb(skb);
02698 }
02699
02700 napi->gro_list = NULL;
02701 }
02702 EXPORT_SYMBOL(netif_napi_del);
02703
02704
02705 static void net_rx_action(struct softirq_action *h)
02706 {
02707 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
02708 unsigned long time_limit = jiffies + 2;
02709 int budget = netdev_budget;
02710 void *have;
02711
02712 local_irq_disable();
02713
02714 while (!list_empty(list)) {
02715 struct napi_struct *n;
02716 int work, weight;
02717
02718
02719
02720
02721
02722 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
02723 goto softnet_break;
02724
02725 local_irq_enable();
02726
02727
02728
02729
02730
02731
02732 n = list_entry(list->next, struct napi_struct, poll_list);
02733
02734 have = netpoll_poll_lock(n);
02735
02736 weight = n->weight;
02737
02738
02739
02740
02741
02742
02743
02744 work = 0;
02745 if (test_bit(NAPI_STATE_SCHED, &n->state))
02746 work = n->poll(n, weight);
02747
02748 WARN_ON_ONCE(work > weight);
02749
02750 budget -= work;
02751
02752 local_irq_disable();
02753
02754
02755
02756
02757
02758
02759 if (unlikely(work == weight)) {
02760 if (unlikely(napi_disable_pending(n)))
02761 __napi_complete(n);
02762 else
02763 list_move_tail(&n->poll_list, list);
02764 }
02765
02766 netpoll_poll_unlock(have);
02767 }
02768 out:
02769 local_irq_enable();
02770
02771 #ifdef CONFIG_NET_DMA
02772
02773
02774
02775
02776 dma_issue_pending_all();
02777 #endif
02778
02779 return;
02780
02781 softnet_break:
02782 __get_cpu_var(netdev_rx_stat).time_squeeze++;
02783 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
02784 goto out;
02785 }
02786
02787 static gifconf_func_t * gifconf_list [NPROTO];
02788
02789
02790
02791
02792
02793
02794
02795
02796
02797
02798 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
02799 {
02800 if (family >= NPROTO)
02801 return -EINVAL;
02802 gifconf_list[family] = gifconf;
02803 return 0;
02804 }
02805
02806
02807
02808
02809
02810
02811
02812
02813
02814
02815
02816
02817
02818 static int dev_ifname(struct net *net, struct ifreq __user *arg)
02819 {
02820 struct net_device *dev;
02821 struct ifreq ifr;
02822
02823
02824
02825
02826
02827 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
02828 return -EFAULT;
02829
02830 read_lock(&dev_base_lock);
02831 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
02832 if (!dev) {
02833 read_unlock(&dev_base_lock);
02834 return -ENODEV;
02835 }
02836
02837 strcpy(ifr.ifr_name, dev->name);
02838 read_unlock(&dev_base_lock);
02839
02840 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
02841 return -EFAULT;
02842 return 0;
02843 }
02844
02845
02846
02847
02848
02849
02850
02851 static int dev_ifconf(struct net *net, char __user *arg)
02852 {
02853 struct ifconf ifc;
02854 struct net_device *dev;
02855 char __user *pos;
02856 int len;
02857 int total;
02858 int i;
02859
02860
02861
02862
02863
02864 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
02865 return -EFAULT;
02866
02867 pos = ifc.ifc_buf;
02868 len = ifc.ifc_len;
02869
02870
02871
02872
02873
02874 total = 0;
02875 for_each_netdev(net, dev) {
02876 for (i = 0; i < NPROTO; i++) {
02877 if (gifconf_list[i]) {
02878 int done;
02879 if (!pos)
02880 done = gifconf_list[i](dev, NULL, 0);
02881 else
02882 done = gifconf_list[i](dev, pos + total,
02883 len - total);
02884 if (done < 0)
02885 return -EFAULT;
02886 total += done;
02887 }
02888 }
02889 }
02890
02891
02892
02893
02894 ifc.ifc_len = total;
02895
02896
02897
02898
02899 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
02900 }
02901
02902 #ifdef CONFIG_PROC_FS
02903
02904
02905
02906
02907 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
02908 __acquires(dev_base_lock)
02909 {
02910 struct net *net = seq_file_net(seq);
02911 loff_t off;
02912 struct net_device *dev;
02913
02914 read_lock(&dev_base_lock);
02915 if (!*pos)
02916 return SEQ_START_TOKEN;
02917
02918 off = 1;
02919 for_each_netdev(net, dev)
02920 if (off++ == *pos)
02921 return dev;
02922
02923 return NULL;
02924 }
02925
02926 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
02927 {
02928 struct net *net = seq_file_net(seq);
02929 ++*pos;
02930 return v == SEQ_START_TOKEN ?
02931 first_net_device(net) : next_net_device((struct net_device *)v);
02932 }
02933
02934 void dev_seq_stop(struct seq_file *seq, void *v)
02935 __releases(dev_base_lock)
02936 {
02937 read_unlock(&dev_base_lock);
02938 }
02939
02940 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
02941 {
02942 const struct net_device_stats *stats = dev_get_stats(dev);
02943
02944 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
02945 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
02946 dev->name, stats->rx_bytes, stats->rx_packets,
02947 stats->rx_errors,
02948 stats->rx_dropped + stats->rx_missed_errors,
02949 stats->rx_fifo_errors,
02950 stats->rx_length_errors + stats->rx_over_errors +
02951 stats->rx_crc_errors + stats->rx_frame_errors,
02952 stats->rx_compressed, stats->multicast,
02953 stats->tx_bytes, stats->tx_packets,
02954 stats->tx_errors, stats->tx_dropped,
02955 stats->tx_fifo_errors, stats->collisions,
02956 stats->tx_carrier_errors +
02957 stats->tx_aborted_errors +
02958 stats->tx_window_errors +
02959 stats->tx_heartbeat_errors,
02960 stats->tx_compressed);
02961 }
02962
02963
02964
02965
02966
02967 static int dev_seq_show(struct seq_file *seq, void *v)
02968 {
02969 if (v == SEQ_START_TOKEN)
02970 seq_puts(seq, "Inter-| Receive "
02971 " | Transmit\n"
02972 " face |bytes packets errs drop fifo frame "
02973 "compressed multicast|bytes packets errs "
02974 "drop fifo colls carrier compressed\n");
02975 else
02976 dev_seq_printf_stats(seq, v);
02977 return 0;
02978 }
02979
02980 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
02981 {
02982 struct netif_rx_stats *rc = NULL;
02983
02984 while (*pos < nr_cpu_ids)
02985 if (cpu_online(*pos)) {
02986 rc = &per_cpu(netdev_rx_stat, *pos);
02987 break;
02988 } else
02989 ++*pos;
02990 return rc;
02991 }
02992
02993 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
02994 {
02995 return softnet_get_online(pos);
02996 }
02997
02998 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
02999 {
03000 ++*pos;
03001 return softnet_get_online(pos);
03002 }
03003
03004 static void softnet_seq_stop(struct seq_file *seq, void *v)
03005 {
03006 }
03007
03008 static int softnet_seq_show(struct seq_file *seq, void *v)
03009 {
03010 struct netif_rx_stats *s = v;
03011
03012 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
03013 s->total, s->dropped, s->time_squeeze, 0,
03014 0, 0, 0, 0,
03015 s->cpu_collision );
03016 return 0;
03017 }
03018
03019 static const struct seq_operations dev_seq_ops = {
03020 .start = dev_seq_start,
03021 .next = dev_seq_next,
03022 .stop = dev_seq_stop,
03023 .show = dev_seq_show,
03024 };
03025
03026 static int dev_seq_open(struct inode *inode, struct file *file)
03027 {
03028 return seq_open_net(inode, file, &dev_seq_ops,
03029 sizeof(struct seq_net_private));
03030 }
03031
03032 static const struct file_operations dev_seq_fops = {
03033 .owner = THIS_MODULE,
03034 .open = dev_seq_open,
03035 .read = seq_read,
03036 .llseek = seq_lseek,
03037 .release = seq_release_net,
03038 };
03039
03040 static const struct seq_operations softnet_seq_ops = {
03041 .start = softnet_seq_start,
03042 .next = softnet_seq_next,
03043 .stop = softnet_seq_stop,
03044 .show = softnet_seq_show,
03045 };
03046
03047 static int softnet_seq_open(struct inode *inode, struct file *file)
03048 {
03049 return seq_open(file, &softnet_seq_ops);
03050 }
03051
03052 static const struct file_operations softnet_seq_fops = {
03053 .owner = THIS_MODULE,
03054 .open = softnet_seq_open,
03055 .read = seq_read,
03056 .llseek = seq_lseek,
03057 .release = seq_release,
03058 };
03059
03060 static void *ptype_get_idx(loff_t pos)
03061 {
03062 struct packet_type *pt = NULL;
03063 loff_t i = 0;
03064 int t;
03065
03066 list_for_each_entry_rcu(pt, &ptype_all, list) {
03067 if (i == pos)
03068 return pt;
03069 ++i;
03070 }
03071
03072 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
03073 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
03074 if (i == pos)
03075 return pt;
03076 ++i;
03077 }
03078 }
03079 return NULL;
03080 }
03081
03082 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
03083 __acquires(RCU)
03084 {
03085 rcu_read_lock();
03086 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
03087 }
03088
03089 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
03090 {
03091 struct packet_type *pt;
03092 struct list_head *nxt;
03093 int hash;
03094
03095 ++*pos;
03096 if (v == SEQ_START_TOKEN)
03097 return ptype_get_idx(0);
03098
03099 pt = v;
03100 nxt = pt->list.next;
03101 if (pt->type == htons(ETH_P_ALL)) {
03102 if (nxt != &ptype_all)
03103 goto found;
03104 hash = 0;
03105 nxt = ptype_base[0].next;
03106 } else
03107 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
03108
03109 while (nxt == &ptype_base[hash]) {
03110 if (++hash >= PTYPE_HASH_SIZE)
03111 return NULL;
03112 nxt = ptype_base[hash].next;
03113 }
03114 found:
03115 return list_entry(nxt, struct packet_type, list);
03116 }
03117
03118 static void ptype_seq_stop(struct seq_file *seq, void *v)
03119 __releases(RCU)
03120 {
03121 rcu_read_unlock();
03122 }
03123
03124 static int ptype_seq_show(struct seq_file *seq, void *v)
03125 {
03126 struct packet_type *pt = v;
03127
03128 if (v == SEQ_START_TOKEN)
03129 seq_puts(seq, "Type Device Function\n");
03130 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
03131 if (pt->type == htons(ETH_P_ALL))
03132 seq_puts(seq, "ALL ");
03133 else
03134 seq_printf(seq, "%04x", ntohs(pt->type));
03135
03136 seq_printf(seq, " %-8s %pF\n",
03137 pt->dev ? pt->dev->name : "", pt->func);
03138 }
03139
03140 return 0;
03141 }
03142
03143 static const struct seq_operations ptype_seq_ops = {
03144 .start = ptype_seq_start,
03145 .next = ptype_seq_next,
03146 .stop = ptype_seq_stop,
03147 .show = ptype_seq_show,
03148 };
03149
03150 static int ptype_seq_open(struct inode *inode, struct file *file)
03151 {
03152 return seq_open_net(inode, file, &ptype_seq_ops,
03153 sizeof(struct seq_net_private));
03154 }
03155
03156 static const struct file_operations ptype_seq_fops = {
03157 .owner = THIS_MODULE,
03158 .open = ptype_seq_open,
03159 .read = seq_read,
03160 .llseek = seq_lseek,
03161 .release = seq_release_net,
03162 };
03163
03164
03165 static int __net_init dev_proc_net_init(struct net *net)
03166 {
03167 int rc = -ENOMEM;
03168
03169 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
03170 goto out;
03171 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
03172 goto out_dev;
03173 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
03174 goto out_softnet;
03175
03176 if (wext_proc_init(net))
03177 goto out_ptype;
03178 rc = 0;
03179 out:
03180 return rc;
03181 out_ptype:
03182 proc_net_remove(net, "ptype");
03183 out_softnet:
03184 proc_net_remove(net, "softnet_stat");
03185 out_dev:
03186 proc_net_remove(net, "dev");
03187 goto out;
03188 }
03189
03190 static void __net_exit dev_proc_net_exit(struct net *net)
03191 {
03192 wext_proc_exit(net);
03193
03194 proc_net_remove(net, "ptype");
03195 proc_net_remove(net, "softnet_stat");
03196 proc_net_remove(net, "dev");
03197 }
03198
03199 static struct pernet_operations __net_initdata dev_proc_ops = {
03200 .init = dev_proc_net_init,
03201 .exit = dev_proc_net_exit,
03202 };
03203
03204 static int __init dev_proc_init(void)
03205 {
03206 return register_pernet_subsys(&dev_proc_ops);
03207 }
03208 #else
03209 #define dev_proc_init() 0
03210 #endif
03211
03212
03213
03214
03215
03216
03217
03218
03219
03220
03221
03222
03223
03224 int netdev_set_master(struct net_device *slave, struct net_device *master)
03225 {
03226 struct net_device *old = slave->master;
03227
03228 ASSERT_RTNL();
03229
03230 if (master) {
03231 if (old)
03232 return -EBUSY;
03233 dev_hold(master);
03234 }
03235
03236 slave->master = master;
03237
03238 synchronize_net();
03239
03240 if (old)
03241 dev_put(old);
03242
03243 if (master)
03244 slave->flags |= IFF_SLAVE;
03245 else
03246 slave->flags &= ~IFF_SLAVE;
03247
03248 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
03249 return 0;
03250 }
03251
03252 static void dev_change_rx_flags(struct net_device *dev, int flags)
03253 {
03254 const struct net_device_ops *ops = dev->netdev_ops;
03255
03256 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
03257 ops->ndo_change_rx_flags(dev, flags);
03258 }
03259
03260 static int __dev_set_promiscuity(struct net_device *dev, int inc)
03261 {
03262 unsigned short old_flags = dev->flags;
03263 uid_t uid;
03264 gid_t gid;
03265
03266 ASSERT_RTNL();
03267
03268 dev->flags |= IFF_PROMISC;
03269 dev->promiscuity += inc;
03270 if (dev->promiscuity == 0) {
03271
03272
03273
03274
03275 if (inc < 0)
03276 dev->flags &= ~IFF_PROMISC;
03277 else {
03278 dev->promiscuity -= inc;
03279 printk(KERN_WARNING "%s: promiscuity touches roof, "
03280 "set promiscuity failed, promiscuity feature "
03281 "of device might be broken.\n", dev->name);
03282 return -EOVERFLOW;
03283 }
03284 }
03285 if (dev->flags != old_flags) {
03286 printk(KERN_INFO "device %s %s promiscuous mode\n",
03287 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
03288 "left");
03289 if (audit_enabled) {
03290 current_uid_gid(&uid, &gid);
03291 audit_log(current->audit_context, GFP_ATOMIC,
03292 AUDIT_ANOM_PROMISCUOUS,
03293 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
03294 dev->name, (dev->flags & IFF_PROMISC),
03295 (old_flags & IFF_PROMISC),
03296 audit_get_loginuid(current),
03297 uid, gid,
03298 audit_get_sessionid(current));
03299 }
03300
03301 dev_change_rx_flags(dev, IFF_PROMISC);
03302 }
03303 return 0;
03304 }
03305
03306
03307
03308
03309
03310
03311
03312
03313
03314
03315
03316
03317 int dev_set_promiscuity(struct net_device *dev, int inc)
03318 {
03319 unsigned short old_flags = dev->flags;
03320 int err;
03321
03322 err = __dev_set_promiscuity(dev, inc);
03323 if (err < 0)
03324 return err;
03325 if (dev->flags != old_flags)
03326 dev_set_rx_mode(dev);
03327 return err;
03328 }
03329
03330
03331
03332
03333
03334
03335
03336
03337
03338
03339
03340
03341
03342
03343 int dev_set_allmulti(struct net_device *dev, int inc)
03344 {
03345 unsigned short old_flags = dev->flags;
03346
03347 ASSERT_RTNL();
03348
03349 dev->flags |= IFF_ALLMULTI;
03350 dev->allmulti += inc;
03351 if (dev->allmulti == 0) {
03352
03353
03354
03355
03356 if (inc < 0)
03357 dev->flags &= ~IFF_ALLMULTI;
03358 else {
03359 dev->allmulti -= inc;
03360 printk(KERN_WARNING "%s: allmulti touches roof, "
03361 "set allmulti failed, allmulti feature of "
03362 "device might be broken.\n", dev->name);
03363 return -EOVERFLOW;
03364 }
03365 }
03366 if (dev->flags ^ old_flags) {
03367 dev_change_rx_flags(dev, IFF_ALLMULTI);
03368 dev_set_rx_mode(dev);
03369 }
03370 return 0;
03371 }
03372
03373
03374
03375
03376
03377
03378
03379 void __dev_set_rx_mode(struct net_device *dev)
03380 {
03381 const struct net_device_ops *ops = dev->netdev_ops;
03382
03383
03384 if (!(dev->flags&IFF_UP))
03385 return;
03386
03387 if (!netif_device_present(dev))
03388 return;
03389
03390 if (ops->ndo_set_rx_mode)
03391 ops->ndo_set_rx_mode(dev);
03392 else {
03393
03394
03395
03396 if (dev->uc_count > 0 && !dev->uc_promisc) {
03397 __dev_set_promiscuity(dev, 1);
03398 dev->uc_promisc = 1;
03399 } else if (dev->uc_count == 0 && dev->uc_promisc) {
03400 __dev_set_promiscuity(dev, -1);
03401 dev->uc_promisc = 0;
03402 }
03403
03404 if (ops->ndo_set_multicast_list)
03405 ops->ndo_set_multicast_list(dev);
03406 }
03407 }
03408
03409 void dev_set_rx_mode(struct net_device *dev)
03410 {
03411 netif_addr_lock_bh(dev);
03412 __dev_set_rx_mode(dev);
03413 netif_addr_unlock_bh(dev);
03414 }
03415
03416 int __dev_addr_delete(struct dev_addr_list **list, int *count,
03417 void *addr, int alen, int glbl)
03418 {
03419 struct dev_addr_list *da;
03420
03421 for (; (da = *list) != NULL; list = &da->next) {
03422 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
03423 alen == da->da_addrlen) {
03424 if (glbl) {
03425 int old_glbl = da->da_gusers;
03426 da->da_gusers = 0;
03427 if (old_glbl == 0)
03428 break;
03429 }
03430 if (--da->da_users)
03431 return 0;
03432
03433 *list = da->next;
03434 kfree(da);
03435 (*count)--;
03436 return 0;
03437 }
03438 }
03439 return -ENOENT;
03440 }
03441
03442 int __dev_addr_add(struct dev_addr_list **list, int *count,
03443 void *addr, int alen, int glbl)
03444 {
03445 struct dev_addr_list *da;
03446
03447 for (da = *list; da != NULL; da = da->next) {
03448 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
03449 da->da_addrlen == alen) {
03450 if (glbl) {
03451 int old_glbl = da->da_gusers;
03452 da->da_gusers = 1;
03453 if (old_glbl)
03454 return 0;
03455 }
03456 da->da_users++;
03457 return 0;
03458 }
03459 }
03460
03461 da = kzalloc(sizeof(*da), GFP_ATOMIC);
03462 if (da == NULL)
03463 return -ENOMEM;
03464 memcpy(da->da_addr, addr, alen);
03465 da->da_addrlen = alen;
03466 da->da_users = 1;
03467 da->da_gusers = glbl ? 1 : 0;
03468 da->next = *list;
03469 *list = da;
03470 (*count)++;
03471 return 0;
03472 }
03473
03474
03475
03476
03477
03478
03479
03480
03481
03482
03483
03484
03485 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
03486 {
03487 int err;
03488
03489 ASSERT_RTNL();
03490
03491 netif_addr_lock_bh(dev);
03492 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
03493 if (!err)
03494 __dev_set_rx_mode(dev);
03495 netif_addr_unlock_bh(dev);
03496 return err;
03497 }
03498 EXPORT_SYMBOL(dev_unicast_delete);
03499
03500
03501
03502
03503
03504
03505
03506
03507
03508
03509
03510
03511 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
03512 {
03513 int err;
03514
03515 ASSERT_RTNL();
03516
03517 netif_addr_lock_bh(dev);
03518 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
03519 if (!err)
03520 __dev_set_rx_mode(dev);
03521 netif_addr_unlock_bh(dev);
03522 return err;
03523 }
03524 EXPORT_SYMBOL(dev_unicast_add);
03525
03526 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
03527 struct dev_addr_list **from, int *from_count)
03528 {
03529 struct dev_addr_list *da, *next;
03530 int err = 0;
03531
03532 da = *from;
03533 while (da != NULL) {
03534 next = da->next;
03535 if (!da->da_synced) {
03536 err = __dev_addr_add(to, to_count,
03537 da->da_addr, da->da_addrlen, 0);
03538 if (err < 0)
03539 break;
03540 da->da_synced = 1;
03541 da->da_users++;
03542 } else if (da->da_users == 1) {
03543 __dev_addr_delete(to, to_count,
03544 da->da_addr, da->da_addrlen, 0);
03545 __dev_addr_delete(from, from_count,
03546 da->da_addr, da->da_addrlen, 0);
03547 }
03548 da = next;
03549 }
03550 return err;
03551 }
03552
03553 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
03554 struct dev_addr_list **from, int *from_count)
03555 {
03556 struct dev_addr_list *da, *next;
03557
03558 da = *from;
03559 while (da != NULL) {
03560 next = da->next;
03561 if (da->da_synced) {
03562 __dev_addr_delete(to, to_count,
03563 da->da_addr, da->da_addrlen, 0);
03564 da->da_synced = 0;
03565 __dev_addr_delete(from, from_count,
03566 da->da_addr, da->da_addrlen, 0);
03567 }
03568 da = next;
03569 }
03570 }
03571
03572
03573
03574
03575
03576
03577
03578
03579
03580
03581
03582
03583
03584 int dev_unicast_sync(struct net_device *to, struct net_device *from)
03585 {
03586 int err = 0;
03587
03588 netif_addr_lock_bh(to);
03589 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
03590 &from->uc_list, &from->uc_count);
03591 if (!err)
03592 __dev_set_rx_mode(to);
03593 netif_addr_unlock_bh(to);
03594 return err;
03595 }
03596 EXPORT_SYMBOL(dev_unicast_sync);
03597
03598
03599
03600
03601
03602
03603
03604
03605
03606
03607 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
03608 {
03609 netif_addr_lock_bh(from);
03610 netif_addr_lock(to);
03611
03612 __dev_addr_unsync(&to->uc_list, &to->uc_count,
03613 &from->uc_list, &from->uc_count);
03614 __dev_set_rx_mode(to);
03615
03616 netif_addr_unlock(to);
03617 netif_addr_unlock_bh(from);
03618 }
03619 EXPORT_SYMBOL(dev_unicast_unsync);
03620
03621 static void __dev_addr_discard(struct dev_addr_list **list)
03622 {
03623 struct dev_addr_list *tmp;
03624
03625 while (*list != NULL) {
03626 tmp = *list;
03627 *list = tmp->next;
03628 if (tmp->da_users > tmp->da_gusers)
03629 printk("__dev_addr_discard: address leakage! "
03630 "da_users=%d\n", tmp->da_users);
03631 kfree(tmp);
03632 }
03633 }
03634
03635 static void dev_addr_discard(struct net_device *dev)
03636 {
03637 netif_addr_lock_bh(dev);
03638
03639 __dev_addr_discard(&dev->uc_list);
03640 dev->uc_count = 0;
03641
03642 __dev_addr_discard(&dev->mc_list);
03643 dev->mc_count = 0;
03644
03645 netif_addr_unlock_bh(dev);
03646 }
03647
03648
03649
03650
03651
03652
03653
03654 unsigned dev_get_flags(const struct net_device *dev)
03655 {
03656 unsigned flags;
03657
03658 flags = (dev->flags & ~(IFF_PROMISC |
03659 IFF_ALLMULTI |
03660 IFF_RUNNING |
03661 IFF_LOWER_UP |
03662 IFF_DORMANT)) |
03663 (dev->gflags & (IFF_PROMISC |
03664 IFF_ALLMULTI));
03665
03666 if (netif_running(dev)) {
03667 if (netif_oper_up(dev))
03668 flags |= IFF_RUNNING;
03669 if (netif_carrier_ok(dev))
03670 flags |= IFF_LOWER_UP;
03671 if (netif_dormant(dev))
03672 flags |= IFF_DORMANT;
03673 }
03674
03675 return flags;
03676 }
03677
03678
03679
03680
03681
03682
03683
03684
03685
03686 int dev_change_flags(struct net_device *dev, unsigned flags)
03687 {
03688 int ret, changes;
03689 int old_flags = dev->flags;
03690
03691 ASSERT_RTNL();
03692
03693
03694
03695
03696
03697 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
03698 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
03699 IFF_AUTOMEDIA)) |
03700 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
03701 IFF_ALLMULTI));
03702
03703
03704
03705
03706
03707 if ((old_flags ^ flags) & IFF_MULTICAST)
03708 dev_change_rx_flags(dev, IFF_MULTICAST);
03709
03710 dev_set_rx_mode(dev);
03711
03712
03713
03714
03715
03716
03717
03718 ret = 0;
03719 if ((old_flags ^ flags) & IFF_UP) {
03720 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
03721
03722 if (!ret)
03723 dev_set_rx_mode(dev);
03724 }
03725
03726 if (dev->flags & IFF_UP &&
03727 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
03728 IFF_VOLATILE)))
03729 call_netdevice_notifiers(NETDEV_CHANGE, dev);
03730
03731 if ((flags ^ dev->gflags) & IFF_PROMISC) {
03732 int inc = (flags & IFF_PROMISC) ? +1 : -1;
03733 dev->gflags ^= IFF_PROMISC;
03734 dev_set_promiscuity(dev, inc);
03735 }
03736
03737
03738
03739
03740
03741 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
03742 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
03743 dev->gflags ^= IFF_ALLMULTI;
03744 dev_set_allmulti(dev, inc);
03745 }
03746
03747
03748 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
03749 if (changes)
03750 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
03751
03752 return ret;
03753 }
03754
03755
03756
03757
03758
03759
03760
03761
03762 int dev_set_mtu(struct net_device *dev, int new_mtu)
03763 {
03764 const struct net_device_ops *ops = dev->netdev_ops;
03765 int err;
03766
03767 if (new_mtu == dev->mtu)
03768 return 0;
03769
03770
03771 if (new_mtu < 0)
03772 return -EINVAL;
03773
03774 if (!netif_device_present(dev))
03775 return -ENODEV;
03776
03777 err = 0;
03778 if (ops->ndo_change_mtu)
03779 err = ops->ndo_change_mtu(dev, new_mtu);
03780 else
03781 dev->mtu = new_mtu;
03782
03783 if (!err && dev->flags & IFF_UP)
03784 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
03785 return err;
03786 }
03787
03788
03789
03790
03791
03792
03793
03794
03795 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
03796 {
03797 const struct net_device_ops *ops = dev->netdev_ops;
03798 int err;
03799
03800 if (!ops->ndo_set_mac_address)
03801 return -EOPNOTSUPP;
03802 if (sa->sa_family != dev->type)
03803 return -EINVAL;
03804 if (!netif_device_present(dev))
03805 return -ENODEV;
03806 err = ops->ndo_set_mac_address(dev, sa);
03807 if (!err)
03808 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
03809 return err;
03810 }
03811
03812
03813
03814
03815 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
03816 {
03817 int err;
03818 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
03819
03820 if (!dev)
03821 return -ENODEV;
03822
03823 switch (cmd) {
03824 case SIOCGIFFLAGS:
03825 ifr->ifr_flags = dev_get_flags(dev);
03826 return 0;
03827
03828 case SIOCGIFMETRIC:
03829
03830 ifr->ifr_metric = 0;
03831 return 0;
03832
03833 case SIOCGIFMTU:
03834 ifr->ifr_mtu = dev->mtu;
03835 return 0;
03836
03837 case SIOCGIFHWADDR:
03838 if (!dev->addr_len)
03839 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
03840 else
03841 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
03842 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
03843 ifr->ifr_hwaddr.sa_family = dev->type;
03844 return 0;
03845
03846 case SIOCGIFSLAVE:
03847 err = -EINVAL;
03848 break;
03849
03850 case SIOCGIFMAP:
03851 ifr->ifr_map.mem_start = dev->mem_start;
03852 ifr->ifr_map.mem_end = dev->mem_end;
03853 ifr->ifr_map.base_addr = dev->base_addr;
03854 ifr->ifr_map.irq = dev->irq;
03855 ifr->ifr_map.dma = dev->dma;
03856 ifr->ifr_map.port = dev->if_port;
03857 return 0;
03858
03859 case SIOCGIFINDEX:
03860 ifr->ifr_ifindex = dev->ifindex;
03861 return 0;
03862
03863 case SIOCGIFTXQLEN:
03864 ifr->ifr_qlen = dev->tx_queue_len;
03865 return 0;
03866
03867 default:
03868
03869
03870
03871 WARN_ON(1);
03872 err = -EINVAL;
03873 break;
03874
03875 }
03876 return err;
03877 }
03878
03879
03880
03881
03882 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
03883 {
03884 int err;
03885 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
03886 const struct net_device_ops *ops;
03887
03888 if (!dev)
03889 return -ENODEV;
03890
03891 ops = dev->netdev_ops;
03892
03893 switch (cmd) {
03894 case SIOCSIFFLAGS:
03895 return dev_change_flags(dev, ifr->ifr_flags);
03896
03897 case SIOCSIFMETRIC:
03898
03899 return -EOPNOTSUPP;
03900
03901 case SIOCSIFMTU:
03902 return dev_set_mtu(dev, ifr->ifr_mtu);
03903
03904 case SIOCSIFHWADDR:
03905 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
03906
03907 case SIOCSIFHWBROADCAST:
03908 if (ifr->ifr_hwaddr.sa_family != dev->type)
03909 return -EINVAL;
03910 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
03911 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
03912 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
03913 return 0;
03914
03915 case SIOCSIFMAP:
03916 if (ops->ndo_set_config) {
03917 if (!netif_device_present(dev))
03918 return -ENODEV;
03919 return ops->ndo_set_config(dev, &ifr->ifr_map);
03920 }
03921 return -EOPNOTSUPP;
03922
03923 case SIOCADDMULTI:
03924 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
03925 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
03926 return -EINVAL;
03927 if (!netif_device_present(dev))
03928 return -ENODEV;
03929 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
03930 dev->addr_len, 1);
03931
03932 case SIOCDELMULTI:
03933 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
03934 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
03935 return -EINVAL;
03936 if (!netif_device_present(dev))
03937 return -ENODEV;
03938 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
03939 dev->addr_len, 1);
03940
03941 case SIOCSIFTXQLEN:
03942 if (ifr->ifr_qlen < 0)
03943 return -EINVAL;
03944 dev->tx_queue_len = ifr->ifr_qlen;
03945 return 0;
03946
03947 case SIOCSIFNAME:
03948 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
03949 return dev_change_name(dev, ifr->ifr_newname);
03950
03951
03952
03953
03954
03955 default:
03956 if ((cmd >= SIOCDEVPRIVATE &&
03957 cmd <= SIOCDEVPRIVATE + 15) ||
03958 cmd == SIOCBONDENSLAVE ||
03959 cmd == SIOCBONDRELEASE ||
03960 cmd == SIOCBONDSETHWADDR ||
03961 cmd == SIOCBONDSLAVEINFOQUERY ||
03962 cmd == SIOCBONDINFOQUERY ||
03963 cmd == SIOCBONDCHANGEACTIVE ||
03964 cmd == SIOCGMIIPHY ||
03965 cmd == SIOCGMIIREG ||
03966 cmd == SIOCSMIIREG ||
03967 cmd == SIOCBRADDIF ||
03968 cmd == SIOCBRDELIF ||
03969 cmd == SIOCWANDEV) {
03970 err = -EOPNOTSUPP;
03971 if (ops->ndo_do_ioctl) {
03972 if (netif_device_present(dev))
03973 err = ops->ndo_do_ioctl(dev, ifr, cmd);
03974 else
03975 err = -ENODEV;
03976 }
03977 } else
03978 err = -EINVAL;
03979
03980 }
03981 return err;
03982 }
03983
03984
03985
03986
03987
03988
03989
03990
03991
03992
03993
03994
03995
03996
03997
03998
03999
04000
04001 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
04002 {
04003 struct ifreq ifr;
04004 int ret;
04005 char *colon;
04006
04007
04008
04009
04010
04011
04012 if (cmd == SIOCGIFCONF) {
04013 rtnl_lock();
04014 ret = dev_ifconf(net, (char __user *) arg);
04015 rtnl_unlock();
04016 return ret;
04017 }
04018 if (cmd == SIOCGIFNAME)
04019 return dev_ifname(net, (struct ifreq __user *)arg);
04020
04021 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
04022 return -EFAULT;
04023
04024 ifr.ifr_name[IFNAMSIZ-1] = 0;
04025
04026 colon = strchr(ifr.ifr_name, ':');
04027 if (colon)
04028 *colon = 0;
04029
04030
04031
04032
04033
04034 switch (cmd) {
04035
04036
04037
04038
04039
04040
04041 case SIOCGIFFLAGS:
04042 case SIOCGIFMETRIC:
04043 case SIOCGIFMTU:
04044 case SIOCGIFHWADDR:
04045 case SIOCGIFSLAVE:
04046 case SIOCGIFMAP:
04047 case SIOCGIFINDEX:
04048 case SIOCGIFTXQLEN:
04049 dev_load(net, ifr.ifr_name);
04050 read_lock(&dev_base_lock);
04051 ret = dev_ifsioc_locked(net, &ifr, cmd);
04052 read_unlock(&dev_base_lock);
04053 if (!ret) {
04054 if (colon)
04055 *colon = ':';
04056 if (copy_to_user(arg, &ifr,
04057 sizeof(struct ifreq)))
04058 ret = -EFAULT;
04059 }
04060 return ret;
04061
04062 case SIOCETHTOOL:
04063 dev_load(net, ifr.ifr_name);
04064 rtnl_lock();
04065 ret = dev_ethtool(net, &ifr);
04066 rtnl_unlock();
04067 if (!ret) {
04068 if (colon)
04069 *colon = ':';
04070 if (copy_to_user(arg, &ifr,
04071 sizeof(struct ifreq)))
04072 ret = -EFAULT;
04073 }
04074 return ret;
04075
04076
04077
04078
04079
04080
04081
04082 case SIOCGMIIPHY:
04083 case SIOCGMIIREG:
04084 case SIOCSIFNAME:
04085 if (!capable(CAP_NET_ADMIN))
04086 return -EPERM;
04087 dev_load(net, ifr.ifr_name);
04088 rtnl_lock();
04089 ret = dev_ifsioc(net, &ifr, cmd);
04090 rtnl_unlock();
04091 if (!ret) {
04092 if (colon)
04093 *colon = ':';
04094 if (copy_to_user(arg, &ifr,
04095 sizeof(struct ifreq)))
04096 ret = -EFAULT;
04097 }
04098 return ret;
04099
04100
04101
04102
04103
04104
04105
04106 case SIOCSIFFLAGS:
04107 case SIOCSIFMETRIC:
04108 case SIOCSIFMTU:
04109 case SIOCSIFMAP:
04110 case SIOCSIFHWADDR:
04111 case SIOCSIFSLAVE:
04112 case SIOCADDMULTI:
04113 case SIOCDELMULTI:
04114 case SIOCSIFHWBROADCAST:
04115 case SIOCSIFTXQLEN:
04116 case SIOCSMIIREG:
04117 case SIOCBONDENSLAVE:
04118 case SIOCBONDRELEASE:
04119 case SIOCBONDSETHWADDR:
04120 case SIOCBONDCHANGEACTIVE:
04121 case SIOCBRADDIF:
04122 case SIOCBRDELIF:
04123 if (!capable(CAP_NET_ADMIN))
04124 return -EPERM;
04125
04126 case SIOCBONDSLAVEINFOQUERY:
04127 case SIOCBONDINFOQUERY:
04128 dev_load(net, ifr.ifr_name);
04129 rtnl_lock();
04130 ret = dev_ifsioc(net, &ifr, cmd);
04131 rtnl_unlock();
04132 return ret;
04133
04134 case SIOCGIFMEM:
04135
04136
04137 case SIOCSIFMEM:
04138
04139
04140 case SIOCSIFLINK:
04141 return -EINVAL;
04142
04143
04144
04145
04146 default:
04147 if (cmd == SIOCWANDEV ||
04148 (cmd >= SIOCDEVPRIVATE &&
04149 cmd <= SIOCDEVPRIVATE + 15)) {
04150 dev_load(net, ifr.ifr_name);
04151 rtnl_lock();
04152 ret = dev_ifsioc(net, &ifr, cmd);
04153 rtnl_unlock();
04154 if (!ret && copy_to_user(arg, &ifr,
04155 sizeof(struct ifreq)))
04156 ret = -EFAULT;
04157 return ret;
04158 }
04159
04160 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
04161 return wext_handle_ioctl(net, &ifr, cmd, arg);
04162 return -EINVAL;
04163 }
04164 }
04165
04166
04167
04168
04169
04170
04171
04172
04173
04174
04175 static int dev_new_index(struct net *net)
04176 {
04177 static int ifindex;
04178 for (;;) {
04179 if (++ifindex <= 0)
04180 ifindex = 1;
04181 if (!__dev_get_by_index(net, ifindex))
04182 return ifindex;
04183 }
04184 }
04185
04186
04187 static LIST_HEAD(net_todo_list);
04188
04189 static void net_set_todo(struct net_device *dev)
04190 {
04191 list_add_tail(&dev->todo_list, &net_todo_list);
04192 }
04193
04194 static void rollback_registered(struct net_device *dev)
04195 {
04196 BUG_ON(dev_boot_phase);
04197 ASSERT_RTNL();
04198
04199
04200 if (dev->reg_state == NETREG_UNINITIALIZED) {
04201 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
04202 "was registered\n", dev->name, dev);
04203
04204 WARN_ON(1);
04205 return;
04206 }
04207
04208 BUG_ON(dev->reg_state != NETREG_REGISTERED);
04209
04210
04211 dev_close(dev);
04212
04213
04214 unlist_netdevice(dev);
04215
04216 dev->reg_state = NETREG_UNREGISTERING;
04217
04218 synchronize_net();
04219
04220
04221 dev_shutdown(dev);
04222
04223
04224
04225
04226
04227 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
04228
04229
04230
04231
04232 dev_addr_discard(dev);
04233
04234 if (dev->netdev_ops->ndo_uninit)
04235 dev->netdev_ops->ndo_uninit(dev);
04236
04237
04238 WARN_ON(dev->master);
04239
04240
04241 netdev_unregister_kobject(dev);
04242
04243 synchronize_net();
04244
04245 dev_put(dev);
04246 }
04247
04248 static void __netdev_init_queue_locks_one(struct net_device *dev,
04249 struct netdev_queue *dev_queue,
04250 void *_unused)
04251 {
04252 spin_lock_init(&dev_queue->_xmit_lock);
04253 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
04254 dev_queue->xmit_lock_owner = -1;
04255 }
04256
04257 static void netdev_init_queue_locks(struct net_device *dev)
04258 {
04259 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
04260 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
04261 }
04262
04263 unsigned long netdev_fix_features(unsigned long features, const char *name)
04264 {
04265
04266 if ((features & NETIF_F_SG) &&
04267 !(features & NETIF_F_ALL_CSUM)) {
04268 if (name)
04269 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
04270 "checksum feature.\n", name);
04271 features &= ~NETIF_F_SG;
04272 }
04273
04274
04275 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
04276 if (name)
04277 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
04278 "SG feature.\n", name);
04279 features &= ~NETIF_F_TSO;
04280 }
04281
04282 if (features & NETIF_F_UFO) {
04283 if (!(features & NETIF_F_GEN_CSUM)) {
04284 if (name)
04285 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
04286 "since no NETIF_F_HW_CSUM feature.\n",
04287 name);
04288 features &= ~NETIF_F_UFO;
04289 }
04290
04291 if (!(features & NETIF_F_SG)) {
04292 if (name)
04293 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
04294 "since no NETIF_F_SG feature.\n", name);
04295 features &= ~NETIF_F_UFO;
04296 }
04297 }
04298
04299 return features;
04300 }
04301 EXPORT_SYMBOL(netdev_fix_features);
04302
04303
04304
04305
04306
04307 void netdev_resync_ops(struct net_device *dev)
04308 {
04309 #ifdef CONFIG_COMPAT_NET_DEV_OPS
04310 const struct net_device_ops *ops = dev->netdev_ops;
04311
04312 dev->init = ops->ndo_init;
04313 dev->uninit = ops->ndo_uninit;
04314 dev->open = ops->ndo_open;
04315 dev->change_rx_flags = ops->ndo_change_rx_flags;
04316 dev->set_rx_mode = ops->ndo_set_rx_mode;
04317 dev->set_multicast_list = ops->ndo_set_multicast_list;
04318 dev->set_mac_address = ops->ndo_set_mac_address;
04319 dev->validate_addr = ops->ndo_validate_addr;
04320 dev->do_ioctl = ops->ndo_do_ioctl;
04321 dev->set_config = ops->ndo_set_config;
04322 dev->change_mtu = ops->ndo_change_mtu;
04323 dev->neigh_setup = ops->ndo_neigh_setup;
04324 dev->tx_timeout = ops->ndo_tx_timeout;
04325 dev->get_stats = ops->ndo_get_stats;
04326 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
04327 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
04328 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
04329 #ifdef CONFIG_NET_POLL_CONTROLLER
04330 dev->poll_controller = ops->ndo_poll_controller;
04331 #endif
04332 #endif
04333 }
04334 EXPORT_SYMBOL(netdev_resync_ops);
04335
04336
04337
04338
04339
04340
04341
04342
04343
04344
04345
04346
04347
04348
04349
04350
04351
04352
04353 int register_netdevice(struct net_device *dev)
04354 {
04355 struct hlist_head *head;
04356 struct hlist_node *p;
04357 int ret;
04358 struct net *net = dev_net(dev);
04359
04360 BUG_ON(dev_boot_phase);
04361 ASSERT_RTNL();
04362
04363 might_sleep();
04364
04365
04366 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
04367 BUG_ON(!net);
04368
04369 spin_lock_init(&dev->addr_list_lock);
04370 netdev_set_addr_lockdep_class(dev);
04371 netdev_init_queue_locks(dev);
04372
04373 dev->iflink = -1;
04374
04375 #ifdef CONFIG_COMPAT_NET_DEV_OPS
04376
04377
04378
04379 if (dev->netdev_ops) {
04380 netdev_resync_ops(dev);
04381 } else {
04382 char drivername[64];
04383 pr_info("%s (%s): not using net_device_ops yet\n",
04384 dev->name, netdev_drivername(dev, drivername, 64));
04385
04386
04387
04388 dev->netdev_ops = (void *) &(dev->init);
04389 }
04390 #endif
04391
04392
04393 if (dev->netdev_ops->ndo_init) {
04394 ret = dev->netdev_ops->ndo_init(dev);
04395 if (ret) {
04396 if (ret > 0)
04397 ret = -EIO;
04398 goto out;
04399 }
04400 }
04401
04402 if (!dev_valid_name(dev->name)) {
04403 ret = -EINVAL;
04404 goto err_uninit;
04405 }
04406
04407 dev->ifindex = dev_new_index(net);
04408 if (dev->iflink == -1)
04409 dev->iflink = dev->ifindex;
04410
04411
04412 head = dev_name_hash(net, dev->name);
04413 hlist_for_each(p, head) {
04414 struct net_device *d
04415 = hlist_entry(p, struct net_device, name_hlist);
04416 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
04417 ret = -EEXIST;
04418 goto err_uninit;
04419 }
04420 }
04421
04422
04423 if ((dev->features & NETIF_F_HW_CSUM) &&
04424 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
04425 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
04426 dev->name);
04427 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
04428 }
04429
04430 if ((dev->features & NETIF_F_NO_CSUM) &&
04431 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
04432 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
04433 dev->name);
04434 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
04435 }
04436
04437 dev->features = netdev_fix_features(dev->features, dev->name);
04438
04439
04440 if (dev->features & NETIF_F_SG)
04441 dev->features |= NETIF_F_GSO;
04442
04443 netdev_initialize_kobject(dev);
04444 ret = netdev_register_kobject(dev);
04445 if (ret)
04446 goto err_uninit;
04447 dev->reg_state = NETREG_REGISTERED;
04448
04449
04450
04451
04452
04453
04454 set_bit(__LINK_STATE_PRESENT, &dev->state);
04455
04456 dev_init_scheduler(dev);
04457 dev_hold(dev);
04458 list_netdevice(dev);
04459
04460
04461 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
04462 ret = notifier_to_errno(ret);
04463 if (ret) {
04464 rollback_registered(dev);
04465 dev->reg_state = NETREG_UNREGISTERED;
04466 }
04467
04468 out:
04469 return ret;
04470
04471 err_uninit:
04472 if (dev->netdev_ops->ndo_uninit)
04473 dev->netdev_ops->ndo_uninit(dev);
04474 goto out;
04475 }
04476
04477
04478
04479
04480
04481
04482
04483
04484
04485
04486
04487 int init_dummy_netdev(struct net_device *dev)
04488 {
04489
04490
04491
04492
04493
04494 memset(dev, 0, sizeof(struct net_device));
04495
04496
04497
04498
04499 dev->reg_state = NETREG_DUMMY;
04500
04501
04502 atomic_set(&dev->refcnt, 1);
04503
04504
04505 INIT_LIST_HEAD(&dev->napi_list);
04506
04507
04508 set_bit(__LINK_STATE_PRESENT, &dev->state);
04509 set_bit(__LINK_STATE_START, &dev->state);
04510
04511 return 0;
04512 }
04513 EXPORT_SYMBOL_GPL(init_dummy_netdev);
04514
04515
04516
04517
04518
04519
04520
04521
04522
04523
04524
04525
04526
04527
04528
04529 int register_netdev(struct net_device *dev)
04530 {
04531 int err;
04532
04533 rtnl_lock();
04534
04535
04536
04537
04538
04539 if (strchr(dev->name, '%')) {
04540 err = dev_alloc_name(dev, dev->name);
04541 if (err < 0)
04542 goto out;
04543 }
04544
04545 err = register_netdevice(dev);
04546 out:
04547 rtnl_unlock();
04548 return err;
04549 }
04550 EXPORT_SYMBOL(register_netdev);
04551
04552
04553
04554
04555
04556
04557
04558
04559
04560
04561
04562
04563 static void netdev_wait_allrefs(struct net_device *dev)
04564 {
04565 unsigned long rebroadcast_time, warning_time;
04566
04567 rebroadcast_time = warning_time = jiffies;
04568 while (atomic_read(&dev->refcnt) != 0) {
04569 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
04570 rtnl_lock();
04571
04572
04573 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
04574
04575 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
04576 &dev->state)) {
04577
04578
04579
04580
04581
04582
04583 linkwatch_run_queue();
04584 }
04585
04586 __rtnl_unlock();
04587
04588 rebroadcast_time = jiffies;
04589 }
04590
04591 msleep(250);
04592
04593 if (time_after(jiffies, warning_time + 10 * HZ)) {
04594 printk(KERN_EMERG "unregister_netdevice: "
04595 "waiting for %s to become free. Usage "
04596 "count = %d\n",
04597 dev->name, atomic_read(&dev->refcnt));
04598 warning_time = jiffies;
04599 }
04600 }
04601 }
04602
04603
04604
04605
04606
04607
04608
04609
04610
04611
04612
04613
04614
04615
04616
04617
04618
04619
04620
04621
04622
04623
04624
04625
04626
04627 void netdev_run_todo(void)
04628 {
04629 struct list_head list;
04630
04631
04632 list_replace_init(&net_todo_list, &list);
04633
04634 __rtnl_unlock();
04635
04636 while (!list_empty(&list)) {
04637 struct net_device *dev
04638 = list_entry(list.next, struct net_device, todo_list);
04639 list_del(&dev->todo_list);
04640
04641 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
04642 printk(KERN_ERR "network todo '%s' but state %d\n",
04643 dev->name, dev->reg_state);
04644 dump_stack();
04645 continue;
04646 }
04647
04648 dev->reg_state = NETREG_UNREGISTERED;
04649
04650 on_each_cpu(flush_backlog, dev, 1);
04651
04652 netdev_wait_allrefs(dev);
04653
04654
04655 BUG_ON(atomic_read(&dev->refcnt));
04656 WARN_ON(dev->ip_ptr);
04657 WARN_ON(dev->ip6_ptr);
04658 WARN_ON(dev->dn_ptr);
04659
04660 if (dev->destructor)
04661 dev->destructor(dev);
04662
04663
04664 kobject_put(&dev->dev.kobj);
04665 }
04666 }
04667
04668
04669
04670
04671
04672
04673
04674
04675
04676 const struct net_device_stats *dev_get_stats(struct net_device *dev)
04677 {
04678 const struct net_device_ops *ops = dev->netdev_ops;
04679
04680 if (ops->ndo_get_stats)
04681 return ops->ndo_get_stats(dev);
04682 else
04683 return &dev->stats;
04684 }
04685 EXPORT_SYMBOL(dev_get_stats);
04686
04687 static void netdev_init_one_queue(struct net_device *dev,
04688 struct netdev_queue *queue,
04689 void *_unused)
04690 {
04691 queue->dev = dev;
04692 }
04693
04694 static void netdev_init_queues(struct net_device *dev)
04695 {
04696 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
04697 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
04698 spin_lock_init(&dev->tx_global_lock);
04699 }
04700
04701
04702
04703
04704
04705
04706
04707
04708
04709
04710
04711
04712 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
04713 void (*setup)(struct net_device *), unsigned int queue_count)
04714 {
04715 struct netdev_queue *tx;
04716 struct net_device *dev;
04717 size_t alloc_size;
04718 void *p;
04719
04720 BUG_ON(strlen(name) >= sizeof(dev->name));
04721
04722 alloc_size = sizeof(struct net_device);
04723 if (sizeof_priv) {
04724
04725 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
04726 alloc_size += sizeof_priv;
04727 }
04728
04729 alloc_size += NETDEV_ALIGN_CONST;
04730
04731 p = kzalloc(alloc_size, GFP_KERNEL);
04732 if (!p) {
04733 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
04734 return NULL;
04735 }
04736
04737 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
04738 if (!tx) {
04739 printk(KERN_ERR "alloc_netdev: Unable to allocate "
04740 "tx qdiscs.\n");
04741 kfree(p);
04742 return NULL;
04743 }
04744
04745 dev = (struct net_device *)
04746 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
04747 dev->padded = (char *)dev - (char *)p;
04748 dev_net_set(dev, &init_net);
04749
04750 dev->_tx = tx;
04751 dev->num_tx_queues = queue_count;
04752 dev->real_num_tx_queues = queue_count;
04753
04754 dev->gso_max_size = GSO_MAX_SIZE;
04755
04756 netdev_init_queues(dev);
04757
04758 INIT_LIST_HEAD(&dev->napi_list);
04759 setup(dev);
04760 strcpy(dev->name, name);
04761 return dev;
04762 }
04763 EXPORT_SYMBOL(alloc_netdev_mq);
04764
04765
04766
04767
04768
04769
04770
04771
04772
04773 void free_netdev(struct net_device *dev)
04774 {
04775 struct napi_struct *p, *n;
04776
04777 release_net(dev_net(dev));
04778
04779 kfree(dev->_tx);
04780
04781 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
04782 netif_napi_del(p);
04783
04784
04785 if (dev->reg_state == NETREG_UNINITIALIZED) {
04786 kfree((char *)dev - dev->padded);
04787 return;
04788 }
04789
04790 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
04791 dev->reg_state = NETREG_RELEASED;
04792
04793
04794 put_device(&dev->dev);
04795 }
04796
04797
04798
04799
04800
04801
04802
04803 void synchronize_net(void)
04804 {
04805 might_sleep();
04806 #ifndef DDE_LINUX
04807 synchronize_rcu();
04808 #endif
04809 }
04810
04811
04812
04813
04814
04815
04816
04817
04818
04819
04820
04821
04822 void unregister_netdevice(struct net_device *dev)
04823 {
04824 ASSERT_RTNL();
04825
04826 rollback_registered(dev);
04827
04828 net_set_todo(dev);
04829 }
04830
04831
04832
04833
04834
04835
04836
04837
04838
04839
04840
04841
04842 void unregister_netdev(struct net_device *dev)
04843 {
04844 rtnl_lock();
04845 unregister_netdevice(dev);
04846 rtnl_unlock();
04847 }
04848
04849 EXPORT_SYMBOL(unregister_netdev);
04850
04851
04852
04853
04854
04855
04856
04857
04858
04859
04860
04861
04862
04863
04864
04865 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
04866 {
04867 char buf[IFNAMSIZ];
04868 const char *destname;
04869 int err;
04870
04871 ASSERT_RTNL();
04872
04873
04874 err = -EINVAL;
04875 if (dev->features & NETIF_F_NETNS_LOCAL)
04876 goto out;
04877
04878 #ifdef CONFIG_SYSFS
04879
04880
04881
04882 err = -EINVAL;
04883 if (dev->dev.parent)
04884 goto out;
04885 #endif
04886
04887
04888 err = -EINVAL;
04889 if (dev->reg_state != NETREG_REGISTERED)
04890 goto out;
04891
04892
04893 err = 0;
04894 if (net_eq(dev_net(dev), net))
04895 goto out;
04896
04897
04898
04899
04900 err = -EEXIST;
04901 destname = dev->name;
04902 if (__dev_get_by_name(net, destname)) {
04903
04904 if (!pat)
04905 goto out;
04906 if (!dev_valid_name(pat))
04907 goto out;
04908 if (strchr(pat, '%')) {
04909 if (__dev_alloc_name(net, pat, buf) < 0)
04910 goto out;
04911 destname = buf;
04912 } else
04913 destname = pat;
04914 if (__dev_get_by_name(net, destname))
04915 goto out;
04916 }
04917
04918
04919
04920
04921
04922
04923 dev_close(dev);
04924
04925
04926 err = -ENODEV;
04927 unlist_netdevice(dev);
04928
04929 synchronize_net();
04930
04931
04932 dev_shutdown(dev);
04933
04934
04935
04936
04937 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
04938
04939
04940
04941
04942 dev_addr_discard(dev);
04943
04944 netdev_unregister_kobject(dev);
04945
04946
04947 dev_net_set(dev, net);
04948
04949
04950 if (destname != dev->name)
04951 strcpy(dev->name, destname);
04952
04953
04954 if (__dev_get_by_index(net, dev->ifindex)) {
04955 int iflink = (dev->iflink == dev->ifindex);
04956 dev->ifindex = dev_new_index(net);
04957 if (iflink)
04958 dev->iflink = dev->ifindex;
04959 }
04960
04961
04962 err = netdev_register_kobject(dev);
04963 WARN_ON(err);
04964
04965
04966 list_netdevice(dev);
04967
04968
04969 call_netdevice_notifiers(NETDEV_REGISTER, dev);
04970
04971 synchronize_net();
04972 err = 0;
04973 out:
04974 return err;
04975 }
04976
04977 static int dev_cpu_callback(struct notifier_block *nfb,
04978 unsigned long action,
04979 void *ocpu)
04980 {
04981 struct sk_buff **list_skb;
04982 struct Qdisc **list_net;
04983 struct sk_buff *skb;
04984 unsigned int cpu, oldcpu = (unsigned long)ocpu;
04985 struct softnet_data *sd, *oldsd;
04986
04987 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
04988 return NOTIFY_OK;
04989
04990 local_irq_disable();
04991 cpu = smp_processor_id();
04992 sd = &per_cpu(softnet_data, cpu);
04993 oldsd = &per_cpu(softnet_data, oldcpu);
04994
04995
04996 list_skb = &sd->completion_queue;
04997 while (*list_skb)
04998 list_skb = &(*list_skb)->next;
04999
05000 *list_skb = oldsd->completion_queue;
05001 oldsd->completion_queue = NULL;
05002
05003
05004 list_net = &sd->output_queue;
05005 while (*list_net)
05006 list_net = &(*list_net)->next_sched;
05007
05008 *list_net = oldsd->output_queue;
05009 oldsd->output_queue = NULL;
05010
05011 raise_softirq_irqoff(NET_TX_SOFTIRQ);
05012 local_irq_enable();
05013
05014
05015 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
05016 netif_rx(skb);
05017
05018 return NOTIFY_OK;
05019 }
05020
05021
05022
05023
05024
05025
05026
05027
05028
05029
05030
05031
05032 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
05033 unsigned long mask)
05034 {
05035
05036 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
05037 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
05038 else if (mask & NETIF_F_ALL_CSUM) {
05039
05040 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
05041 !(all & NETIF_F_GEN_CSUM)) {
05042 all &= ~NETIF_F_ALL_CSUM;
05043 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
05044 }
05045
05046
05047 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
05048 all &= ~NETIF_F_ALL_CSUM;
05049 all |= NETIF_F_HW_CSUM;
05050 }
05051 }
05052
05053 one |= NETIF_F_ALL_CSUM;
05054
05055 one |= all & NETIF_F_ONE_FOR_ALL;
05056 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
05057 all |= one & mask & NETIF_F_ONE_FOR_ALL;
05058
05059 return all;
05060 }
05061 EXPORT_SYMBOL(netdev_increment_features);
05062
05063 static struct hlist_head *netdev_create_hash(void)
05064 {
05065 int i;
05066 struct hlist_head *hash;
05067
05068 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
05069 if (hash != NULL)
05070 for (i = 0; i < NETDEV_HASHENTRIES; i++)
05071 INIT_HLIST_HEAD(&hash[i]);
05072
05073 return hash;
05074 }
05075
05076
05077 static int __net_init netdev_init(struct net *net)
05078 {
05079 INIT_LIST_HEAD(&net->dev_base_head);
05080
05081 net->dev_name_head = netdev_create_hash();
05082 if (net->dev_name_head == NULL)
05083 goto err_name;
05084
05085 net->dev_index_head = netdev_create_hash();
05086 if (net->dev_index_head == NULL)
05087 goto err_idx;
05088
05089 return 0;
05090
05091 err_idx:
05092 kfree(net->dev_name_head);
05093 err_name:
05094 return -ENOMEM;
05095 }
05096
05097
05098
05099
05100
05101
05102
05103
05104
05105 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
05106 {
05107 const struct device_driver *driver;
05108 const struct device *parent;
05109
05110 if (len <= 0 || !buffer)
05111 return buffer;
05112 buffer[0] = 0;
05113
05114 parent = dev->dev.parent;
05115
05116 if (!parent)
05117 return buffer;
05118
05119 driver = parent->driver;
05120 if (driver && driver->name)
05121 strlcpy(buffer, driver->name, len);
05122 return buffer;
05123 }
05124
05125 static void __net_exit netdev_exit(struct net *net)
05126 {
05127 kfree(net->dev_name_head);
05128 kfree(net->dev_index_head);
05129 }
05130
05131 static struct pernet_operations __net_initdata netdev_net_ops = {
05132 .init = netdev_init,
05133 .exit = netdev_exit,
05134 };
05135
05136 static void __net_exit default_device_exit(struct net *net)
05137 {
05138 struct net_device *dev;
05139
05140
05141
05142
05143 rtnl_lock();
05144 restart:
05145 for_each_netdev(net, dev) {
05146 int err;
05147 char fb_name[IFNAMSIZ];
05148
05149
05150 if (dev->features & NETIF_F_NETNS_LOCAL)
05151 continue;
05152
05153
05154 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
05155 dev->rtnl_link_ops->dellink(dev);
05156 goto restart;
05157 }
05158
05159
05160 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
05161 err = dev_change_net_namespace(dev, &init_net, fb_name);
05162 if (err) {
05163 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
05164 __func__, dev->name, err);
05165 BUG();
05166 }
05167 goto restart;
05168 }
05169 rtnl_unlock();
05170 }
05171
05172 static struct pernet_operations __net_initdata default_device_ops = {
05173 .exit = default_device_exit,
05174 };
05175
05176
05177
05178
05179
05180
05181
05182
05183
05184
05185
05186
05187 static int __init net_dev_init(void)
05188 {
05189 int i, rc = -ENOMEM;
05190
05191 BUG_ON(!dev_boot_phase);
05192
05193 if (dev_proc_init())
05194 goto out;
05195
05196 if (netdev_kobject_init())
05197 goto out;
05198
05199 INIT_LIST_HEAD(&ptype_all);
05200 for (i = 0; i < PTYPE_HASH_SIZE; i++)
05201 INIT_LIST_HEAD(&ptype_base[i]);
05202
05203 if (register_pernet_subsys(&netdev_net_ops))
05204 goto out;
05205
05206
05207
05208
05209
05210 for_each_possible_cpu(i) {
05211 struct softnet_data *queue;
05212
05213 queue = &per_cpu(softnet_data, i);
05214 skb_queue_head_init(&queue->input_pkt_queue);
05215 queue->completion_queue = NULL;
05216 INIT_LIST_HEAD(&queue->poll_list);
05217
05218 queue->backlog.poll = process_backlog;
05219 queue->backlog.weight = weight_p;
05220 queue->backlog.gro_list = NULL;
05221 }
05222
05223 dev_boot_phase = 0;
05224
05225
05226
05227
05228
05229
05230
05231
05232
05233
05234 #ifndef DDE_LINUX
05235 if (register_pernet_device(&loopback_net_ops))
05236 goto out;
05237 #endif
05238
05239 if (register_pernet_device(&default_device_ops))
05240 goto out;
05241
05242 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
05243 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
05244
05245 hotcpu_notifier(dev_cpu_callback, 0);
05246 #ifndef DDE_LINUX
05247 dst_init();
05248 #endif
05249 dev_mcast_init();
05250 rc = 0;
05251 out:
05252 return rc;
05253 }
05254
05255 subsys_initcall(net_dev_init);
05256
05257 EXPORT_SYMBOL(__dev_get_by_index);
05258 EXPORT_SYMBOL(__dev_get_by_name);
05259 EXPORT_SYMBOL(__dev_remove_pack);
05260 EXPORT_SYMBOL(dev_valid_name);
05261 EXPORT_SYMBOL(dev_add_pack);
05262 EXPORT_SYMBOL(dev_alloc_name);
05263 EXPORT_SYMBOL(dev_close);
05264 EXPORT_SYMBOL(dev_get_by_flags);
05265 EXPORT_SYMBOL(dev_get_by_index);
05266 EXPORT_SYMBOL(dev_get_by_name);
05267 EXPORT_SYMBOL(dev_open);
05268 EXPORT_SYMBOL(dev_queue_xmit);
05269 EXPORT_SYMBOL(dev_remove_pack);
05270 EXPORT_SYMBOL(dev_set_allmulti);
05271 EXPORT_SYMBOL(dev_set_promiscuity);
05272 EXPORT_SYMBOL(dev_change_flags);
05273 EXPORT_SYMBOL(dev_set_mtu);
05274 EXPORT_SYMBOL(dev_set_mac_address);
05275 EXPORT_SYMBOL(free_netdev);
05276 EXPORT_SYMBOL(netdev_boot_setup_check);
05277 EXPORT_SYMBOL(netdev_set_master);
05278 EXPORT_SYMBOL(netdev_state_change);
05279 EXPORT_SYMBOL(netif_receive_skb);
05280 EXPORT_SYMBOL(netif_rx);
05281 EXPORT_SYMBOL(register_gifconf);
05282 EXPORT_SYMBOL(register_netdevice);
05283 EXPORT_SYMBOL(register_netdevice_notifier);
05284 EXPORT_SYMBOL(skb_checksum_help);
05285 EXPORT_SYMBOL(synchronize_net);
05286 EXPORT_SYMBOL(unregister_netdevice);
05287 EXPORT_SYMBOL(unregister_netdevice_notifier);
05288 EXPORT_SYMBOL(net_enable_timestamp);
05289 EXPORT_SYMBOL(net_disable_timestamp);
05290 EXPORT_SYMBOL(dev_get_flags);
05291
05292 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
05293 EXPORT_SYMBOL(br_handle_frame_hook);
05294 EXPORT_SYMBOL(br_fdb_get_hook);
05295 EXPORT_SYMBOL(br_fdb_put_hook);
05296 #endif
05297
05298 #ifdef CONFIG_KMOD
05299 EXPORT_SYMBOL(dev_load);
05300 #endif
05301
05302 EXPORT_PER_CPU_SYMBOL(softnet_data);