00001
00002
00003
00004
00005
00006
00007 #include <linux/init.h>
00008 #include <linux/fs.h>
00009 #include <linux/kdev_t.h>
00010 #include <linux/slab.h>
00011 #include <linux/string.h>
00012
00013 #include <linux/major.h>
00014 #include <linux/errno.h>
00015 #include <linux/module.h>
00016 #include <linux/smp_lock.h>
00017 #include <linux/seq_file.h>
00018
00019 #include <linux/kobject.h>
00020 #include <linux/kobj_map.h>
00021 #include <linux/cdev.h>
00022 #include <linux/mutex.h>
00023 #include <linux/backing-dev.h>
00024
00025 #ifdef CONFIG_KMOD
00026 #include <linux/kmod.h>
00027 #endif
00028 #include "internal.h"
00029
00030 #ifdef DDE_LINUX
00031 #include "local.h"
00032 #endif
00033
00034
00035
00036
00037
00038
00039
00040
00041 struct backing_dev_info directly_mappable_cdev_bdi = {
00042 .capabilities = (
00043 #ifdef CONFIG_MMU
00044
00045 BDI_CAP_MAP_COPY |
00046 #endif
00047
00048 BDI_CAP_MAP_DIRECT |
00049 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
00050 };
00051
00052 static struct kobj_map *cdev_map;
00053
00054 static DEFINE_MUTEX(chrdevs_lock);
00055
00056 static struct char_device_struct {
00057 struct char_device_struct *next;
00058 unsigned int major;
00059 unsigned int baseminor;
00060 int minorct;
00061 char name[64];
00062 struct cdev *cdev;
00063 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
00064
00065
00066 static inline int major_to_index(int major)
00067 {
00068 return major % CHRDEV_MAJOR_HASH_SIZE;
00069 }
00070
00071 #ifdef CONFIG_PROC_FS
00072
00073 void chrdev_show(struct seq_file *f, off_t offset)
00074 {
00075 struct char_device_struct *cd;
00076
00077 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
00078 mutex_lock(&chrdevs_lock);
00079 for (cd = chrdevs[offset]; cd; cd = cd->next)
00080 seq_printf(f, "%3d %s\n", cd->major, cd->name);
00081 mutex_unlock(&chrdevs_lock);
00082 }
00083 }
00084
00085 #endif
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098 static struct char_device_struct *
00099 __register_chrdev_region(unsigned int major, unsigned int baseminor,
00100 int minorct, const char *name)
00101 {
00102 struct char_device_struct *cd, **cp;
00103 int ret = 0;
00104 int i;
00105
00106 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
00107 if (cd == NULL)
00108 return ERR_PTR(-ENOMEM);
00109
00110 mutex_lock(&chrdevs_lock);
00111
00112
00113 if (major == 0) {
00114 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
00115 if (chrdevs[i] == NULL)
00116 break;
00117 }
00118
00119 if (i == 0) {
00120 ret = -EBUSY;
00121 goto out;
00122 }
00123 major = i;
00124 ret = major;
00125 }
00126
00127 cd->major = major;
00128 cd->baseminor = baseminor;
00129 cd->minorct = minorct;
00130 strlcpy(cd->name, name, sizeof(cd->name));
00131
00132 i = major_to_index(major);
00133
00134 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
00135 if ((*cp)->major > major ||
00136 ((*cp)->major == major &&
00137 (((*cp)->baseminor >= baseminor) ||
00138 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
00139 break;
00140
00141
00142 if (*cp && (*cp)->major == major) {
00143 int old_min = (*cp)->baseminor;
00144 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
00145 int new_min = baseminor;
00146 int new_max = baseminor + minorct - 1;
00147
00148
00149 if (new_max >= old_min && new_max <= old_max) {
00150 ret = -EBUSY;
00151 goto out;
00152 }
00153
00154
00155 if (new_min <= old_max && new_min >= old_min) {
00156 ret = -EBUSY;
00157 goto out;
00158 }
00159 }
00160
00161 cd->next = *cp;
00162 *cp = cd;
00163 mutex_unlock(&chrdevs_lock);
00164 return cd;
00165 out:
00166 mutex_unlock(&chrdevs_lock);
00167 kfree(cd);
00168 return ERR_PTR(ret);
00169 }
00170
00171 static struct char_device_struct *
00172 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
00173 {
00174 struct char_device_struct *cd = NULL, **cp;
00175 int i = major_to_index(major);
00176
00177 mutex_lock(&chrdevs_lock);
00178 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
00179 if ((*cp)->major == major &&
00180 (*cp)->baseminor == baseminor &&
00181 (*cp)->minorct == minorct)
00182 break;
00183 if (*cp) {
00184 cd = *cp;
00185 *cp = cd->next;
00186 }
00187 mutex_unlock(&chrdevs_lock);
00188 return cd;
00189 }
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200 int register_chrdev_region(dev_t from, unsigned count, const char *name)
00201 {
00202 struct char_device_struct *cd;
00203 dev_t to = from + count;
00204 dev_t n, next;
00205
00206 for (n = from; n < to; n = next) {
00207 next = MKDEV(MAJOR(n)+1, 0);
00208 if (next > to)
00209 next = to;
00210 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
00211 next - n, name);
00212 if (IS_ERR(cd))
00213 goto fail;
00214 }
00215 return 0;
00216 fail:
00217 to = n;
00218 for (n = from; n < to; n = next) {
00219 next = MKDEV(MAJOR(n)+1, 0);
00220 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
00221 }
00222 return PTR_ERR(cd);
00223 }
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
00237 const char *name)
00238 {
00239 struct char_device_struct *cd;
00240 cd = __register_chrdev_region(0, baseminor, count, name);
00241 if (IS_ERR(cd))
00242 return PTR_ERR(cd);
00243 *dev = MKDEV(cd->major, cd->baseminor);
00244 return 0;
00245 }
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269 int register_chrdev(unsigned int major, const char *name,
00270 const struct file_operations *fops)
00271 {
00272 struct char_device_struct *cd;
00273 struct cdev *cdev;
00274 char *s;
00275 int err = -ENOMEM;
00276
00277 cd = __register_chrdev_region(major, 0, 256, name);
00278 if (IS_ERR(cd))
00279 return PTR_ERR(cd);
00280
00281 cdev = cdev_alloc();
00282 if (!cdev)
00283 goto out2;
00284
00285 cdev->owner = fops->owner;
00286 cdev->ops = fops;
00287 kobject_set_name(&cdev->kobj, "%s", name);
00288 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
00289 *s = '!';
00290
00291 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
00292 if (err)
00293 goto out;
00294
00295 cd->cdev = cdev;
00296
00297 return major ? 0 : cd->major;
00298 out:
00299 kobject_put(&cdev->kobj);
00300 out2:
00301 kfree(__unregister_chrdev_region(cd->major, 0, 256));
00302 return err;
00303 }
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314 void unregister_chrdev_region(dev_t from, unsigned count)
00315 {
00316 dev_t to = from + count;
00317 dev_t n, next;
00318
00319 for (n = from; n < to; n = next) {
00320 next = MKDEV(MAJOR(n)+1, 0);
00321 if (next > to)
00322 next = to;
00323 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
00324 }
00325 }
00326
00327 void unregister_chrdev(unsigned int major, const char *name)
00328 {
00329 struct char_device_struct *cd;
00330 cd = __unregister_chrdev_region(major, 0, 256);
00331 if (cd && cd->cdev)
00332 cdev_del(cd->cdev);
00333 kfree(cd);
00334 }
00335
00336 static DEFINE_SPINLOCK(cdev_lock);
00337
00338 static struct kobject *cdev_get(struct cdev *p)
00339 {
00340 struct module *owner = p->owner;
00341 struct kobject *kobj;
00342
00343 if (owner && !try_module_get(owner))
00344 return NULL;
00345 kobj = kobject_get(&p->kobj);
00346 if (!kobj)
00347 module_put(owner);
00348 return kobj;
00349 }
00350
00351 void cdev_put(struct cdev *p)
00352 {
00353 if (p) {
00354 struct module *owner = p->owner;
00355 kobject_put(&p->kobj);
00356 module_put(owner);
00357 }
00358 }
00359
00360
00361
00362
00363 static int chrdev_open(struct inode *inode, struct file *filp)
00364 {
00365 struct cdev *p;
00366 struct cdev *new = NULL;
00367 int ret = 0;
00368
00369 spin_lock(&cdev_lock);
00370 p = inode->i_cdev;
00371 if (!p) {
00372 struct kobject *kobj;
00373 int idx;
00374 spin_unlock(&cdev_lock);
00375 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
00376 if (!kobj)
00377 return -ENXIO;
00378 new = container_of(kobj, struct cdev, kobj);
00379 spin_lock(&cdev_lock);
00380
00381
00382 p = inode->i_cdev;
00383 if (!p) {
00384 inode->i_cdev = p = new;
00385 inode->i_cindex = idx;
00386 list_add(&inode->i_devices, &p->list);
00387 new = NULL;
00388 } else if (!cdev_get(p))
00389 ret = -ENXIO;
00390 } else if (!cdev_get(p))
00391 ret = -ENXIO;
00392 spin_unlock(&cdev_lock);
00393 cdev_put(new);
00394 if (ret)
00395 return ret;
00396
00397 ret = -ENXIO;
00398 filp->f_op = fops_get(p->ops);
00399 if (!filp->f_op)
00400 goto out_cdev_put;
00401
00402 if (filp->f_op->open) {
00403 ret = filp->f_op->open(inode,filp);
00404 if (ret)
00405 goto out_cdev_put;
00406 }
00407
00408 return 0;
00409
00410 out_cdev_put:
00411 cdev_put(p);
00412 return ret;
00413 }
00414
00415 void cd_forget(struct inode *inode)
00416 {
00417 spin_lock(&cdev_lock);
00418 list_del_init(&inode->i_devices);
00419 inode->i_cdev = NULL;
00420 spin_unlock(&cdev_lock);
00421 }
00422
00423 static void cdev_purge(struct cdev *cdev)
00424 {
00425 spin_lock(&cdev_lock);
00426 while (!list_empty(&cdev->list)) {
00427 struct inode *inode;
00428 inode = container_of(cdev->list.next, struct inode, i_devices);
00429 list_del_init(&inode->i_devices);
00430 inode->i_cdev = NULL;
00431 }
00432 spin_unlock(&cdev_lock);
00433 }
00434
00435
00436
00437
00438
00439
00440 const struct file_operations def_chr_fops = {
00441 .open = chrdev_open,
00442 };
00443
00444 static struct kobject *exact_match(dev_t dev, int *part, void *data)
00445 {
00446 struct cdev *p = data;
00447 return &p->kobj;
00448 }
00449
00450 static int exact_lock(dev_t dev, void *data)
00451 {
00452 struct cdev *p = data;
00453 return cdev_get(p) ? 0 : -1;
00454 }
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
00466 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
00467 {
00468 p->dev = dev;
00469 p->count = count;
00470 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
00471 }
00472
00473 static void cdev_unmap(dev_t dev, unsigned count)
00474 {
00475 kobj_unmap(cdev_map, dev, count);
00476 }
00477
00478
00479
00480
00481
00482
00483
00484
00485 void cdev_del(struct cdev *p)
00486 {
00487 cdev_unmap(p->dev, p->count);
00488 kobject_put(&p->kobj);
00489 }
00490
00491
00492 static void cdev_default_release(struct kobject *kobj)
00493 {
00494 struct cdev *p = container_of(kobj, struct cdev, kobj);
00495 cdev_purge(p);
00496 }
00497
00498 static void cdev_dynamic_release(struct kobject *kobj)
00499 {
00500 struct cdev *p = container_of(kobj, struct cdev, kobj);
00501 cdev_purge(p);
00502 kfree(p);
00503 }
00504
00505 static struct kobj_type ktype_cdev_default = {
00506 .release = cdev_default_release,
00507 };
00508
00509 static struct kobj_type ktype_cdev_dynamic = {
00510 .release = cdev_dynamic_release,
00511 };
00512
00513
00514
00515
00516
00517
00518 struct cdev *cdev_alloc(void)
00519 {
00520 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
00521 if (p) {
00522 INIT_LIST_HEAD(&p->list);
00523 kobject_init(&p->kobj, &ktype_cdev_dynamic);
00524 }
00525 return p;
00526 }
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
00537 {
00538 memset(cdev, 0, sizeof *cdev);
00539 INIT_LIST_HEAD(&cdev->list);
00540 kobject_init(&cdev->kobj, &ktype_cdev_default);
00541 cdev->ops = fops;
00542 }
00543
00544 static struct kobject *base_probe(dev_t dev, int *part, void *data)
00545 {
00546 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
00547
00548 request_module("char-major-%d", MAJOR(dev));
00549 return NULL;
00550 }
00551
00552 void __init chrdev_init(void)
00553 {
00554 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
00555 bdi_init(&directly_mappable_cdev_bdi);
00556 }
00557
00558 #ifndef LIBINPUT
00559 core_initcall(chrdev_init);
00560 #endif
00561
00562
00563 EXPORT_SYMBOL(register_chrdev_region);
00564 EXPORT_SYMBOL(unregister_chrdev_region);
00565 EXPORT_SYMBOL(alloc_chrdev_region);
00566 EXPORT_SYMBOL(cdev_init);
00567 EXPORT_SYMBOL(cdev_alloc);
00568 EXPORT_SYMBOL(cdev_del);
00569 EXPORT_SYMBOL(cdev_add);
00570 EXPORT_SYMBOL(register_chrdev);
00571 EXPORT_SYMBOL(unregister_chrdev);
00572 EXPORT_SYMBOL(directly_mappable_cdev_bdi);