00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #include <linux/kernel.h>
00011 #include <linux/delay.h>
00012 #include <linux/init.h>
00013 #include <linux/pci.h>
00014 #include <linux/pm.h>
00015 #include <linux/module.h>
00016 #include <linux/spinlock.h>
00017 #include <linux/string.h>
00018 #include <linux/log2.h>
00019 #include <linux/pci-aspm.h>
00020 #include <linux/pm_wakeup.h>
00021 #include <linux/interrupt.h>
00022 #include <asm/dma.h>
00023 #include "pci.h"
00024
00025 #ifdef DDE_LINUX
00026 #include "local.h"
00027 #endif
00028
00029 unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
00030
00031 #ifdef CONFIG_PCI_DOMAINS
00032 int pci_domains_supported = 1;
00033 #endif
00034
00035 #define DEFAULT_CARDBUS_IO_SIZE (256)
00036 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
00037
00038 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
00039 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
00040
00041
00042
00043
00044
00045
00046
00047
00048 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
00049 {
00050 struct list_head *tmp;
00051 unsigned char max, n;
00052
00053 max = bus->subordinate;
00054 list_for_each(tmp, &bus->children) {
00055 n = pci_bus_max_busnr(pci_bus_b(tmp));
00056 if(n > max)
00057 max = n;
00058 }
00059 return max;
00060 }
00061 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
00062
00063 #ifdef CONFIG_HAS_IOMEM
00064 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
00065 {
00066
00067
00068
00069 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
00070 WARN_ON(1);
00071 return NULL;
00072 }
00073 return ioremap_nocache(pci_resource_start(pdev, bar),
00074 pci_resource_len(pdev, bar));
00075 }
00076 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
00077 #endif
00078
00079 #if 0
00080
00081
00082
00083
00084
00085
00086 unsigned char __devinit
00087 pci_max_busnr(void)
00088 {
00089 struct pci_bus *bus = NULL;
00090 unsigned char max, n;
00091
00092 max = 0;
00093 while ((bus = pci_find_next_bus(bus)) != NULL) {
00094 n = pci_bus_max_busnr(bus);
00095 if(n > max)
00096 max = n;
00097 }
00098 return max;
00099 }
00100
00101 #endif
00102
00103 #define PCI_FIND_CAP_TTL 48
00104
00105 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
00106 u8 pos, int cap, int *ttl)
00107 {
00108 u8 id;
00109
00110 while ((*ttl)--) {
00111 pci_bus_read_config_byte(bus, devfn, pos, &pos);
00112 if (pos < 0x40)
00113 break;
00114 pos &= ~3;
00115 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
00116 &id);
00117 if (id == 0xff)
00118 break;
00119 if (id == cap)
00120 return pos;
00121 pos += PCI_CAP_LIST_NEXT;
00122 }
00123 return 0;
00124 }
00125
00126 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
00127 u8 pos, int cap)
00128 {
00129 int ttl = PCI_FIND_CAP_TTL;
00130
00131 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
00132 }
00133
00134 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
00135 {
00136 return __pci_find_next_cap(dev->bus, dev->devfn,
00137 pos + PCI_CAP_LIST_NEXT, cap);
00138 }
00139 EXPORT_SYMBOL_GPL(pci_find_next_capability);
00140
00141 static int __pci_bus_find_cap_start(struct pci_bus *bus,
00142 unsigned int devfn, u8 hdr_type)
00143 {
00144 u16 status;
00145
00146 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
00147 if (!(status & PCI_STATUS_CAP_LIST))
00148 return 0;
00149
00150 switch (hdr_type) {
00151 case PCI_HEADER_TYPE_NORMAL:
00152 case PCI_HEADER_TYPE_BRIDGE:
00153 return PCI_CAPABILITY_LIST;
00154 case PCI_HEADER_TYPE_CARDBUS:
00155 return PCI_CB_CAPABILITY_LIST;
00156 default:
00157 return 0;
00158 }
00159
00160 return 0;
00161 }
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182 int pci_find_capability(struct pci_dev *dev, int cap)
00183 {
00184 int pos;
00185
00186 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
00187 if (pos)
00188 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
00189
00190 return pos;
00191 }
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
00207 {
00208 int pos;
00209 u8 hdr_type;
00210
00211 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
00212
00213 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
00214 if (pos)
00215 pos = __pci_find_next_cap(bus, devfn, pos, cap);
00216
00217 return pos;
00218 }
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234 int pci_find_ext_capability(struct pci_dev *dev, int cap)
00235 {
00236 u32 header;
00237 int ttl;
00238 int pos = PCI_CFG_SPACE_SIZE;
00239
00240
00241 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
00242
00243 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
00244 return 0;
00245
00246 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
00247 return 0;
00248
00249
00250
00251
00252
00253 if (header == 0)
00254 return 0;
00255
00256 while (ttl-- > 0) {
00257 if (PCI_EXT_CAP_ID(header) == cap)
00258 return pos;
00259
00260 pos = PCI_EXT_CAP_NEXT(header);
00261 if (pos < PCI_CFG_SPACE_SIZE)
00262 break;
00263
00264 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
00265 break;
00266 }
00267
00268 return 0;
00269 }
00270 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
00271
00272 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
00273 {
00274 int rc, ttl = PCI_FIND_CAP_TTL;
00275 u8 cap, mask;
00276
00277 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
00278 mask = HT_3BIT_CAP_MASK;
00279 else
00280 mask = HT_5BIT_CAP_MASK;
00281
00282 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
00283 PCI_CAP_ID_HT, &ttl);
00284 while (pos) {
00285 rc = pci_read_config_byte(dev, pos + 3, &cap);
00286 if (rc != PCIBIOS_SUCCESSFUL)
00287 return 0;
00288
00289 if ((cap & mask) == ht_cap)
00290 return pos;
00291
00292 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
00293 pos + PCI_CAP_LIST_NEXT,
00294 PCI_CAP_ID_HT, &ttl);
00295 }
00296
00297 return 0;
00298 }
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
00313 {
00314 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
00315 }
00316 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
00330 {
00331 int pos;
00332
00333 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
00334 if (pos)
00335 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
00336
00337 return pos;
00338 }
00339 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350 struct resource *
00351 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
00352 {
00353 const struct pci_bus *bus = dev->bus;
00354 int i;
00355 struct resource *best = NULL;
00356
00357 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
00358 struct resource *r = bus->resource[i];
00359 if (!r)
00360 continue;
00361 if (res->start && !(res->start >= r->start && res->end <= r->end))
00362 continue;
00363 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
00364 continue;
00365 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
00366 return r;
00367 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
00368 best = r;
00369 }
00370 return best;
00371 }
00372
00373
00374
00375
00376
00377
00378
00379
00380 static void
00381 pci_restore_bars(struct pci_dev *dev)
00382 {
00383 int i;
00384
00385 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
00386 pci_update_resource(dev, i);
00387 }
00388
00389 static struct pci_platform_pm_ops *pci_platform_pm;
00390
00391 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
00392 {
00393 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
00394 || !ops->sleep_wake || !ops->can_wakeup)
00395 return -EINVAL;
00396 pci_platform_pm = ops;
00397 return 0;
00398 }
00399
00400 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
00401 {
00402 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
00403 }
00404
00405 static inline int platform_pci_set_power_state(struct pci_dev *dev,
00406 pci_power_t t)
00407 {
00408 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
00409 }
00410
00411 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
00412 {
00413 return pci_platform_pm ?
00414 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
00415 }
00416
00417 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
00418 {
00419 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
00420 }
00421
00422 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
00423 {
00424 return pci_platform_pm ?
00425 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
00426 }
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442 static int
00443 pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
00444 {
00445 u16 pmcsr;
00446 bool need_restore = false;
00447
00448 if (!dev->pm_cap)
00449 return -EIO;
00450
00451 if (state < PCI_D0 || state > PCI_D3hot)
00452 return -EINVAL;
00453
00454
00455
00456
00457
00458 if (dev->current_state == state) {
00459
00460 return 0;
00461 } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
00462 && dev->current_state > state) {
00463 dev_err(&dev->dev, "invalid power transition "
00464 "(from state %d to %d)\n", dev->current_state, state);
00465 return -EINVAL;
00466 }
00467
00468
00469 if ((state == PCI_D1 && !dev->d1_support)
00470 || (state == PCI_D2 && !dev->d2_support))
00471 return -EIO;
00472
00473 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
00474
00475
00476
00477
00478
00479 switch (dev->current_state) {
00480 case PCI_D0:
00481 case PCI_D1:
00482 case PCI_D2:
00483 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
00484 pmcsr |= state;
00485 break;
00486 case PCI_UNKNOWN:
00487 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
00488 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) {
00489 need_restore = true;
00490 wait = true;
00491 }
00492
00493 default:
00494 pmcsr = 0;
00495 break;
00496 }
00497
00498
00499 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
00500
00501 if (!wait)
00502 return 0;
00503
00504
00505
00506 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
00507 msleep(pci_pm_d3_delay);
00508 else if (state == PCI_D2 || dev->current_state == PCI_D2)
00509 udelay(PCI_PM_D2_DELAY);
00510
00511 dev->current_state = state;
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525 if (need_restore)
00526 pci_restore_bars(dev);
00527
00528 if (wait && dev->bus->self)
00529 pcie_aspm_pm_state_change(dev->bus->self);
00530
00531 return 0;
00532 }
00533
00534
00535
00536
00537
00538
00539
00540 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
00541 {
00542 if (dev->pm_cap) {
00543 u16 pmcsr;
00544
00545 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
00546 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
00547 } else {
00548 dev->current_state = state;
00549 }
00550 }
00551
00552
00553
00554
00555
00556
00557
00558
00559
00560
00561
00562
00563
00564
00565
00566
00567 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
00568 {
00569 int error;
00570
00571
00572 if (state > PCI_D3hot)
00573 state = PCI_D3hot;
00574 else if (state < PCI_D0)
00575 state = PCI_D0;
00576 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
00577
00578
00579
00580
00581
00582 return 0;
00583
00584 if (state == PCI_D0 && platform_pci_power_manageable(dev)) {
00585
00586
00587
00588
00589 int ret = platform_pci_set_power_state(dev, PCI_D0);
00590 if (!ret)
00591 pci_update_current_state(dev, PCI_D0);
00592 }
00593
00594
00595 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
00596 return 0;
00597
00598 error = pci_raw_set_power_state(dev, state, true);
00599
00600 if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
00601
00602 int ret = platform_pci_set_power_state(dev, state);
00603 if (!ret) {
00604 pci_update_current_state(dev, state);
00605 error = 0;
00606 }
00607 }
00608
00609 return error;
00610 }
00611
00612
00613
00614
00615
00616
00617
00618
00619
00620
00621
00622 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
00623 {
00624 pci_power_t ret;
00625
00626 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
00627 return PCI_D0;
00628
00629 ret = platform_pci_choose_state(dev);
00630 if (ret != PCI_POWER_ERROR)
00631 return ret;
00632
00633 switch (state.event) {
00634 case PM_EVENT_ON:
00635 return PCI_D0;
00636 case PM_EVENT_FREEZE:
00637 case PM_EVENT_PRETHAW:
00638
00639 case PM_EVENT_SUSPEND:
00640 case PM_EVENT_HIBERNATE:
00641 return PCI_D3hot;
00642 default:
00643 dev_info(&dev->dev, "unrecognized suspend event %d\n",
00644 state.event);
00645 BUG();
00646 }
00647 return PCI_D0;
00648 }
00649
00650 EXPORT_SYMBOL(pci_choose_state);
00651
00652 static int pci_save_pcie_state(struct pci_dev *dev)
00653 {
00654 int pos, i = 0;
00655 struct pci_cap_saved_state *save_state;
00656 u16 *cap;
00657
00658 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
00659 if (pos <= 0)
00660 return 0;
00661
00662 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
00663 if (!save_state) {
00664 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
00665 return -ENOMEM;
00666 }
00667 cap = (u16 *)&save_state->data[0];
00668
00669 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
00670 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
00671 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
00672 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
00673
00674 return 0;
00675 }
00676
00677 static void pci_restore_pcie_state(struct pci_dev *dev)
00678 {
00679 int i = 0, pos;
00680 struct pci_cap_saved_state *save_state;
00681 u16 *cap;
00682
00683 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
00684 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
00685 if (!save_state || pos <= 0)
00686 return;
00687 cap = (u16 *)&save_state->data[0];
00688
00689 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
00690 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
00691 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
00692 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
00693 }
00694
00695
00696 static int pci_save_pcix_state(struct pci_dev *dev)
00697 {
00698 int pos;
00699 struct pci_cap_saved_state *save_state;
00700
00701 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
00702 if (pos <= 0)
00703 return 0;
00704
00705 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
00706 if (!save_state) {
00707 dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
00708 return -ENOMEM;
00709 }
00710
00711 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
00712
00713 return 0;
00714 }
00715
00716 static void pci_restore_pcix_state(struct pci_dev *dev)
00717 {
00718 int i = 0, pos;
00719 struct pci_cap_saved_state *save_state;
00720 u16 *cap;
00721
00722 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
00723 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
00724 if (!save_state || pos <= 0)
00725 return;
00726 cap = (u16 *)&save_state->data[0];
00727
00728 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
00729 }
00730
00731
00732
00733
00734
00735
00736 int
00737 pci_save_state(struct pci_dev *dev)
00738 {
00739 int i;
00740
00741 for (i = 0; i < 16; i++)
00742 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
00743 dev->state_saved = true;
00744 if ((i = pci_save_pcie_state(dev)) != 0)
00745 return i;
00746 if ((i = pci_save_pcix_state(dev)) != 0)
00747 return i;
00748 return 0;
00749 }
00750
00751
00752
00753
00754
00755 int
00756 pci_restore_state(struct pci_dev *dev)
00757 {
00758 int i;
00759 u32 val;
00760
00761
00762 pci_restore_pcie_state(dev);
00763
00764
00765
00766
00767
00768 for (i = 15; i >= 0; i--) {
00769 pci_read_config_dword(dev, i * 4, &val);
00770 if (val != dev->saved_config_space[i]) {
00771 dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
00772 "space at offset %#x (was %#x, writing %#x)\n",
00773 i, val, (int)dev->saved_config_space[i]);
00774 pci_write_config_dword(dev,i * 4,
00775 dev->saved_config_space[i]);
00776 }
00777 }
00778 pci_restore_pcix_state(dev);
00779 pci_restore_msi_state(dev);
00780
00781 return 0;
00782 }
00783
00784 static int do_pci_enable_device(struct pci_dev *dev, int bars)
00785 {
00786 int err;
00787
00788 err = pci_set_power_state(dev, PCI_D0);
00789 if (err < 0 && err != -EIO)
00790 return err;
00791 err = pcibios_enable_device(dev, bars);
00792 if (err < 0)
00793 return err;
00794 pci_fixup_device(pci_fixup_enable, dev);
00795
00796 return 0;
00797 }
00798
00799
00800
00801
00802
00803
00804
00805
00806 int pci_reenable_device(struct pci_dev *dev)
00807 {
00808 if (atomic_read(&dev->enable_cnt))
00809 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
00810 return 0;
00811 }
00812
00813 static int __pci_enable_device_flags(struct pci_dev *dev,
00814 resource_size_t flags)
00815 {
00816 int err;
00817 int i, bars = 0;
00818
00819 if (atomic_add_return(1, &dev->enable_cnt) > 1)
00820 return 0;
00821
00822 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
00823 if (dev->resource[i].flags & flags)
00824 bars |= (1 << i);
00825
00826 err = do_pci_enable_device(dev, bars);
00827 if (err < 0)
00828 atomic_dec(&dev->enable_cnt);
00829 return err;
00830 }
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840 int pci_enable_device_io(struct pci_dev *dev)
00841 {
00842 return __pci_enable_device_flags(dev, IORESOURCE_IO);
00843 }
00844
00845
00846
00847
00848
00849
00850
00851
00852
00853 int pci_enable_device_mem(struct pci_dev *dev)
00854 {
00855 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
00856 }
00857
00858
00859 #ifndef DDE_LINUX
00860
00861
00862
00863
00864
00865
00866
00867
00868
00869
00870
00871 int pci_enable_device(struct pci_dev *dev)
00872 {
00873 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
00874 }
00875 #endif
00876
00877
00878
00879
00880
00881
00882
00883 struct pci_devres {
00884 unsigned int enabled:1;
00885 unsigned int pinned:1;
00886 unsigned int orig_intx:1;
00887 unsigned int restore_intx:1;
00888 u32 region_mask;
00889 };
00890
00891 static void pcim_release(struct device *gendev, void *res)
00892 {
00893 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
00894 struct pci_devres *this = res;
00895 int i;
00896
00897 if (dev->msi_enabled)
00898 pci_disable_msi(dev);
00899 if (dev->msix_enabled)
00900 pci_disable_msix(dev);
00901
00902 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
00903 if (this->region_mask & (1 << i))
00904 pci_release_region(dev, i);
00905
00906 if (this->restore_intx)
00907 pci_intx(dev, this->orig_intx);
00908
00909 if (this->enabled && !this->pinned)
00910 pci_disable_device(dev);
00911 }
00912
00913 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
00914 {
00915 struct pci_devres *dr, *new_dr;
00916
00917 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
00918 if (dr)
00919 return dr;
00920
00921 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
00922 if (!new_dr)
00923 return NULL;
00924 return devres_get(&pdev->dev, new_dr, NULL, NULL);
00925 }
00926
00927 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
00928 {
00929 if (pci_is_managed(pdev))
00930 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
00931 return NULL;
00932 }
00933
00934
00935
00936
00937
00938
00939
00940 int pcim_enable_device(struct pci_dev *pdev)
00941 {
00942 struct pci_devres *dr;
00943 int rc;
00944
00945 dr = get_pci_dr(pdev);
00946 if (unlikely(!dr))
00947 return -ENOMEM;
00948 if (dr->enabled)
00949 return 0;
00950
00951 rc = pci_enable_device(pdev);
00952 if (!rc) {
00953 pdev->is_managed = 1;
00954 dr->enabled = 1;
00955 }
00956 return rc;
00957 }
00958
00959
00960
00961
00962
00963
00964
00965
00966
00967 void pcim_pin_device(struct pci_dev *pdev)
00968 {
00969 struct pci_devres *dr;
00970
00971 dr = find_pci_dr(pdev);
00972 WARN_ON(!dr || !dr->enabled);
00973 if (dr)
00974 dr->pinned = 1;
00975 }
00976
00977 #ifndef DDE_LINUX
00978
00979
00980
00981
00982
00983
00984
00985
00986 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
00987
00988 static void do_pci_disable_device(struct pci_dev *dev)
00989 {
00990 u16 pci_command;
00991
00992 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
00993 if (pci_command & PCI_COMMAND_MASTER) {
00994 pci_command &= ~PCI_COMMAND_MASTER;
00995 pci_write_config_word(dev, PCI_COMMAND, pci_command);
00996 }
00997
00998 pcibios_disable_device(dev);
00999 }
01000
01001
01002
01003
01004
01005
01006
01007
01008 void pci_disable_enabled_device(struct pci_dev *dev)
01009 {
01010 if (atomic_read(&dev->enable_cnt))
01011 do_pci_disable_device(dev);
01012 }
01013
01014
01015
01016
01017
01018
01019
01020
01021
01022
01023
01024 void
01025 pci_disable_device(struct pci_dev *dev)
01026 {
01027 struct pci_devres *dr;
01028
01029 dr = find_pci_dr(dev);
01030 if (dr)
01031 dr->enabled = 0;
01032
01033 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
01034 return;
01035
01036 do_pci_disable_device(dev);
01037
01038 dev->is_busmaster = 0;
01039 }
01040
01041
01042
01043
01044
01045
01046
01047
01048
01049
01050 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
01051 enum pcie_reset_state state)
01052 {
01053 return -EINVAL;
01054 }
01055 #endif
01056
01057
01058
01059
01060
01061
01062
01063
01064
01065 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
01066 {
01067 return pcibios_set_pcie_reset_state(dev, state);
01068 }
01069
01070
01071
01072
01073
01074
01075 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
01076 {
01077 if (!dev->pm_cap)
01078 return false;
01079
01080 return !!(dev->pme_support & (1 << state));
01081 }
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091 void pci_pme_active(struct pci_dev *dev, bool enable)
01092 {
01093 u16 pmcsr;
01094
01095 if (!dev->pm_cap)
01096 return;
01097
01098 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
01099
01100 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
01101 if (!enable)
01102 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
01103
01104 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
01105
01106 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
01107 enable ? "enabled" : "disabled");
01108 }
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
01130 {
01131 int error = 0;
01132 bool pme_done = false;
01133
01134 if (enable && !device_may_wakeup(&dev->dev))
01135 return -EINVAL;
01136
01137
01138
01139
01140
01141
01142
01143 if (!enable && platform_pci_can_wakeup(dev))
01144 error = platform_pci_sleep_wake(dev, false);
01145
01146 if (!enable || pci_pme_capable(dev, state)) {
01147 pci_pme_active(dev, enable);
01148 pme_done = true;
01149 }
01150
01151 if (enable && platform_pci_can_wakeup(dev))
01152 error = platform_pci_sleep_wake(dev, true);
01153
01154 return pme_done ? 0 : error;
01155 }
01156
01157
01158
01159
01160
01161
01162
01163
01164
01165
01166
01167
01168
01169
01170
01171 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
01172 {
01173 return pci_pme_capable(dev, PCI_D3cold) ?
01174 pci_enable_wake(dev, PCI_D3cold, enable) :
01175 pci_enable_wake(dev, PCI_D3hot, enable);
01176 }
01177
01178
01179
01180
01181
01182
01183
01184
01185
01186 pci_power_t pci_target_state(struct pci_dev *dev)
01187 {
01188 pci_power_t target_state = PCI_D3hot;
01189
01190 if (platform_pci_power_manageable(dev)) {
01191
01192
01193
01194
01195 pci_power_t state = platform_pci_choose_state(dev);
01196
01197 switch (state) {
01198 case PCI_POWER_ERROR:
01199 case PCI_UNKNOWN:
01200 break;
01201 case PCI_D1:
01202 case PCI_D2:
01203 if (pci_no_d1d2(dev))
01204 break;
01205 default:
01206 target_state = state;
01207 }
01208 } else if (device_may_wakeup(&dev->dev)) {
01209
01210
01211
01212
01213
01214 if (!dev->pm_cap)
01215 return PCI_POWER_ERROR;
01216
01217 if (dev->pme_support) {
01218 while (target_state
01219 && !(dev->pme_support & (1 << target_state)))
01220 target_state--;
01221 }
01222 }
01223
01224 return target_state;
01225 }
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235 int pci_prepare_to_sleep(struct pci_dev *dev)
01236 {
01237 pci_power_t target_state = pci_target_state(dev);
01238 int error;
01239
01240 if (target_state == PCI_POWER_ERROR)
01241 return -EIO;
01242
01243 pci_enable_wake(dev, target_state, true);
01244
01245 error = pci_set_power_state(dev, target_state);
01246
01247 if (error)
01248 pci_enable_wake(dev, target_state, false);
01249
01250 return error;
01251 }
01252
01253
01254
01255
01256
01257
01258
01259 int pci_back_from_sleep(struct pci_dev *dev)
01260 {
01261 pci_enable_wake(dev, PCI_D0, false);
01262 return pci_set_power_state(dev, PCI_D0);
01263 }
01264
01265
01266
01267
01268
01269 void pci_pm_init(struct pci_dev *dev)
01270 {
01271 int pm;
01272 u16 pmc;
01273
01274 dev->pm_cap = 0;
01275
01276
01277 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
01278 if (!pm)
01279 return;
01280
01281 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
01282
01283 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
01284 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
01285 pmc & PCI_PM_CAP_VER_MASK);
01286 return;
01287 }
01288
01289 dev->pm_cap = pm;
01290
01291 dev->d1_support = false;
01292 dev->d2_support = false;
01293 if (!pci_no_d1d2(dev)) {
01294 if (pmc & PCI_PM_CAP_D1)
01295 dev->d1_support = true;
01296 if (pmc & PCI_PM_CAP_D2)
01297 dev->d2_support = true;
01298
01299 if (dev->d1_support || dev->d2_support)
01300 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
01301 dev->d1_support ? " D1" : "",
01302 dev->d2_support ? " D2" : "");
01303 }
01304
01305 pmc &= PCI_PM_CAP_PME_MASK;
01306 if (pmc) {
01307 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
01308 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
01309 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
01310 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
01311 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
01312 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
01313 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
01314
01315
01316
01317
01318 device_set_wakeup_capable(&dev->dev, true);
01319 device_set_wakeup_enable(&dev->dev, false);
01320
01321 pci_pme_active(dev, false);
01322 } else {
01323 dev->pme_support = 0;
01324 }
01325 }
01326
01327
01328
01329
01330
01331
01332
01333
01334
01335
01336
01337 void platform_pci_wakeup_init(struct pci_dev *dev)
01338 {
01339 if (!platform_pci_can_wakeup(dev))
01340 return;
01341
01342 device_set_wakeup_capable(&dev->dev, true);
01343 device_set_wakeup_enable(&dev->dev, false);
01344 platform_pci_sleep_wake(dev, false);
01345 }
01346
01347
01348
01349
01350
01351
01352
01353
01354 static int pci_add_cap_save_buffer(
01355 struct pci_dev *dev, char cap, unsigned int size)
01356 {
01357 int pos;
01358 struct pci_cap_saved_state *save_state;
01359
01360 pos = pci_find_capability(dev, cap);
01361 if (pos <= 0)
01362 return 0;
01363
01364 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
01365 if (!save_state)
01366 return -ENOMEM;
01367
01368 save_state->cap_nr = cap;
01369 pci_add_saved_cap(dev, save_state);
01370
01371 return 0;
01372 }
01373
01374
01375
01376
01377
01378 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
01379 {
01380 int error;
01381
01382 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16));
01383 if (error)
01384 dev_err(&dev->dev,
01385 "unable to preallocate PCI Express save buffer\n");
01386
01387 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
01388 if (error)
01389 dev_err(&dev->dev,
01390 "unable to preallocate PCI-X save buffer\n");
01391 }
01392
01393
01394
01395
01396
01397
01398
01399
01400
01401 int pci_restore_standard_config(struct pci_dev *dev)
01402 {
01403 pci_power_t prev_state;
01404 int error;
01405
01406 pci_update_current_state(dev, PCI_D0);
01407
01408 prev_state = dev->current_state;
01409 if (prev_state == PCI_D0)
01410 goto Restore;
01411
01412 error = pci_raw_set_power_state(dev, PCI_D0, false);
01413 if (error)
01414 return error;
01415
01416
01417
01418
01419
01420
01421 switch(prev_state) {
01422 case PCI_D3cold:
01423 case PCI_D3hot:
01424 mdelay(pci_pm_d3_delay);
01425 break;
01426 case PCI_D2:
01427 udelay(PCI_PM_D2_DELAY);
01428 break;
01429 }
01430
01431 pci_update_current_state(dev, PCI_D0);
01432
01433 Restore:
01434 return dev->state_saved ? pci_restore_state(dev) : 0;
01435 }
01436
01437
01438
01439
01440
01441 void pci_enable_ari(struct pci_dev *dev)
01442 {
01443 int pos;
01444 u32 cap;
01445 u16 ctrl;
01446 struct pci_dev *bridge;
01447
01448 if (!dev->is_pcie || dev->devfn)
01449 return;
01450
01451 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
01452 if (!pos)
01453 return;
01454
01455 bridge = dev->bus->self;
01456 if (!bridge || !bridge->is_pcie)
01457 return;
01458
01459 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
01460 if (!pos)
01461 return;
01462
01463 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
01464 if (!(cap & PCI_EXP_DEVCAP2_ARI))
01465 return;
01466
01467 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
01468 ctrl |= PCI_EXP_DEVCTL2_ARI;
01469 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
01470
01471 bridge->ari_enabled = 1;
01472 }
01473
01474
01475
01476
01477
01478
01479
01480
01481
01482
01483 u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
01484 {
01485 return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
01486 }
01487
01488 int
01489 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
01490 {
01491 u8 pin;
01492
01493 pin = dev->pin;
01494 if (!pin)
01495 return -1;
01496
01497 while (dev->bus->self) {
01498 pin = pci_swizzle_interrupt_pin(dev, pin);
01499 dev = dev->bus->self;
01500 }
01501 *bridge = dev;
01502 return pin;
01503 }
01504
01505
01506
01507
01508
01509
01510
01511
01512
01513 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
01514 {
01515 u8 pin = *pinp;
01516
01517 while (dev->bus->self) {
01518 pin = pci_swizzle_interrupt_pin(dev, pin);
01519 dev = dev->bus->self;
01520 }
01521 *pinp = pin;
01522 return PCI_SLOT(dev->devfn);
01523 }
01524
01525
01526
01527
01528
01529
01530
01531
01532
01533
01534 void pci_release_region(struct pci_dev *pdev, int bar)
01535 {
01536 struct pci_devres *dr;
01537
01538 if (pci_resource_len(pdev, bar) == 0)
01539 return;
01540 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
01541 release_region(pci_resource_start(pdev, bar),
01542 pci_resource_len(pdev, bar));
01543 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
01544 release_mem_region(pci_resource_start(pdev, bar),
01545 pci_resource_len(pdev, bar));
01546
01547 dr = find_pci_dr(pdev);
01548 if (dr)
01549 dr->region_mask &= ~(1 << bar);
01550 }
01551
01552
01553
01554
01555
01556
01557
01558
01559
01560
01561
01562
01563
01564
01565
01566
01567
01568
01569
01570
01571 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
01572 int exclusive)
01573 {
01574 struct pci_devres *dr;
01575
01576 if (pci_resource_len(pdev, bar) == 0)
01577 return 0;
01578
01579 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
01580 if (!request_region(pci_resource_start(pdev, bar),
01581 pci_resource_len(pdev, bar), res_name))
01582 goto err_out;
01583 }
01584 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
01585 if (!__request_mem_region(pci_resource_start(pdev, bar),
01586 pci_resource_len(pdev, bar), res_name,
01587 exclusive))
01588 goto err_out;
01589 }
01590
01591 dr = find_pci_dr(pdev);
01592 if (dr)
01593 dr->region_mask |= 1 << bar;
01594
01595 return 0;
01596
01597 err_out:
01598 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
01599 bar,
01600 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
01601 &pdev->resource[bar]);
01602 return -EBUSY;
01603 }
01604
01605
01606
01607
01608
01609
01610
01611
01612
01613
01614
01615
01616
01617
01618
01619 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
01620 {
01621 return __pci_request_region(pdev, bar, res_name, 0);
01622 }
01623
01624
01625
01626
01627
01628
01629
01630
01631
01632
01633
01634
01635
01636
01637
01638
01639
01640
01641
01642 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
01643 {
01644 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
01645 }
01646
01647
01648
01649
01650
01651
01652
01653
01654 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
01655 {
01656 int i;
01657
01658 for (i = 0; i < 6; i++)
01659 if (bars & (1 << i))
01660 pci_release_region(pdev, i);
01661 }
01662
01663 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
01664 const char *res_name, int excl)
01665 {
01666 int i;
01667
01668 for (i = 0; i < 6; i++)
01669 if (bars & (1 << i))
01670 if (__pci_request_region(pdev, i, res_name, excl))
01671 goto err_out;
01672 return 0;
01673
01674 err_out:
01675 while(--i >= 0)
01676 if (bars & (1 << i))
01677 pci_release_region(pdev, i);
01678
01679 return -EBUSY;
01680 }
01681
01682
01683
01684
01685
01686
01687
01688
01689 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
01690 const char *res_name)
01691 {
01692 return __pci_request_selected_regions(pdev, bars, res_name, 0);
01693 }
01694
01695 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
01696 int bars, const char *res_name)
01697 {
01698 return __pci_request_selected_regions(pdev, bars, res_name,
01699 IORESOURCE_EXCLUSIVE);
01700 }
01701
01702
01703
01704
01705
01706
01707
01708
01709
01710
01711 void pci_release_regions(struct pci_dev *pdev)
01712 {
01713 pci_release_selected_regions(pdev, (1 << 6) - 1);
01714 }
01715
01716
01717
01718
01719
01720
01721
01722
01723
01724
01725
01726
01727
01728
01729 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
01730 {
01731 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
01732 }
01733
01734 #ifndef DDE_LINUX
01735
01736
01737
01738
01739
01740
01741
01742
01743
01744
01745
01746
01747
01748
01749
01750
01751 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
01752 {
01753 return pci_request_selected_regions_exclusive(pdev,
01754 ((1 << 6) - 1), res_name);
01755 }
01756
01757 static void __pci_set_master(struct pci_dev *dev, bool enable)
01758 {
01759 u16 old_cmd, cmd;
01760
01761 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
01762 if (enable)
01763 cmd = old_cmd | PCI_COMMAND_MASTER;
01764 else
01765 cmd = old_cmd & ~PCI_COMMAND_MASTER;
01766 if (cmd != old_cmd) {
01767 dev_dbg(&dev->dev, "%s bus mastering\n",
01768 enable ? "enabling" : "disabling");
01769 pci_write_config_word(dev, PCI_COMMAND, cmd);
01770 }
01771 dev->is_busmaster = enable;
01772 }
01773
01774
01775
01776
01777
01778
01779
01780
01781 void pci_set_master(struct pci_dev *dev)
01782 {
01783 __pci_set_master(dev, true);
01784 pcibios_set_master(dev);
01785 }
01786
01787
01788
01789
01790
01791 void pci_clear_master(struct pci_dev *dev)
01792 {
01793 __pci_set_master(dev, false);
01794 }
01795 #endif
01796
01797 #ifdef PCI_DISABLE_MWI
01798 int pci_set_mwi(struct pci_dev *dev)
01799 {
01800 return 0;
01801 }
01802
01803 int pci_try_set_mwi(struct pci_dev *dev)
01804 {
01805 return 0;
01806 }
01807
01808 void pci_clear_mwi(struct pci_dev *dev)
01809 {
01810 }
01811
01812 #else
01813
01814 #ifndef PCI_CACHE_LINE_BYTES
01815 #define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
01816 #endif
01817
01818
01819
01820 u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
01821
01822
01823
01824
01825
01826
01827
01828
01829
01830
01831
01832 static int
01833 pci_set_cacheline_size(struct pci_dev *dev)
01834 {
01835 u8 cacheline_size;
01836
01837 if (!pci_cache_line_size)
01838 return -EINVAL;
01839
01840
01841
01842 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
01843 if (cacheline_size >= pci_cache_line_size &&
01844 (cacheline_size % pci_cache_line_size) == 0)
01845 return 0;
01846
01847
01848 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
01849
01850 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
01851 if (cacheline_size == pci_cache_line_size)
01852 return 0;
01853
01854 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
01855 "supported\n", pci_cache_line_size << 2);
01856
01857 return -EINVAL;
01858 }
01859
01860
01861
01862
01863
01864
01865
01866
01867
01868 int
01869 pci_set_mwi(struct pci_dev *dev)
01870 {
01871 int rc;
01872 u16 cmd;
01873
01874 rc = pci_set_cacheline_size(dev);
01875 if (rc)
01876 return rc;
01877
01878 pci_read_config_word(dev, PCI_COMMAND, &cmd);
01879 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
01880 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
01881 cmd |= PCI_COMMAND_INVALIDATE;
01882 pci_write_config_word(dev, PCI_COMMAND, cmd);
01883 }
01884
01885 return 0;
01886 }
01887
01888
01889
01890
01891
01892
01893
01894
01895
01896
01897 int pci_try_set_mwi(struct pci_dev *dev)
01898 {
01899 int rc = pci_set_mwi(dev);
01900 return rc;
01901 }
01902
01903
01904
01905
01906
01907
01908
01909 void
01910 pci_clear_mwi(struct pci_dev *dev)
01911 {
01912 u16 cmd;
01913
01914 pci_read_config_word(dev, PCI_COMMAND, &cmd);
01915 if (cmd & PCI_COMMAND_INVALIDATE) {
01916 cmd &= ~PCI_COMMAND_INVALIDATE;
01917 pci_write_config_word(dev, PCI_COMMAND, cmd);
01918 }
01919 }
01920 #endif
01921
01922
01923
01924
01925
01926
01927
01928
01929 void
01930 pci_intx(struct pci_dev *pdev, int enable)
01931 {
01932 u16 pci_command, new;
01933
01934 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
01935
01936 if (enable) {
01937 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
01938 } else {
01939 new = pci_command | PCI_COMMAND_INTX_DISABLE;
01940 }
01941
01942 if (new != pci_command) {
01943 struct pci_devres *dr;
01944
01945 pci_write_config_word(pdev, PCI_COMMAND, new);
01946
01947 dr = find_pci_dr(pdev);
01948 if (dr && !dr->restore_intx) {
01949 dr->restore_intx = 1;
01950 dr->orig_intx = !enable;
01951 }
01952 }
01953 }
01954
01955
01956
01957
01958
01959
01960
01961
01962
01963 void pci_msi_off(struct pci_dev *dev)
01964 {
01965 int pos;
01966 u16 control;
01967
01968 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
01969 if (pos) {
01970 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
01971 control &= ~PCI_MSI_FLAGS_ENABLE;
01972 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
01973 }
01974 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
01975 if (pos) {
01976 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
01977 control &= ~PCI_MSIX_FLAGS_ENABLE;
01978 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
01979 }
01980 }
01981
01982 #ifndef HAVE_ARCH_PCI_SET_DMA_MASK
01983
01984
01985
01986 int
01987 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
01988 {
01989 if (!pci_dma_supported(dev, mask))
01990 return -EIO;
01991
01992 dev->dma_mask = mask;
01993
01994 return 0;
01995 }
01996
01997 int
01998 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
01999 {
02000 if (!pci_dma_supported(dev, mask))
02001 return -EIO;
02002
02003 dev->dev.coherent_dma_mask = mask;
02004
02005 return 0;
02006 }
02007 #endif
02008
02009 #ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
02010 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
02011 {
02012 return dma_set_max_seg_size(&dev->dev, size);
02013 }
02014 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
02015 #endif
02016
02017 #ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY
02018 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
02019 {
02020 return dma_set_seg_boundary(&dev->dev, mask);
02021 }
02022 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
02023 #endif
02024
02025 static int __pcie_flr(struct pci_dev *dev, int probe)
02026 {
02027 u16 status;
02028 u32 cap;
02029 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
02030
02031 if (!exppos)
02032 return -ENOTTY;
02033 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
02034 if (!(cap & PCI_EXP_DEVCAP_FLR))
02035 return -ENOTTY;
02036
02037 if (probe)
02038 return 0;
02039
02040 pci_block_user_cfg_access(dev);
02041
02042
02043 msleep(100);
02044 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
02045 if (status & PCI_EXP_DEVSTA_TRPND) {
02046 dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
02047 "sleeping for 1 second\n");
02048 ssleep(1);
02049 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
02050 if (status & PCI_EXP_DEVSTA_TRPND)
02051 dev_info(&dev->dev, "Still busy after 1s; "
02052 "proceeding with reset anyway\n");
02053 }
02054
02055 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
02056 PCI_EXP_DEVCTL_BCR_FLR);
02057 mdelay(100);
02058
02059 pci_unblock_user_cfg_access(dev);
02060 return 0;
02061 }
02062
02063 static int __pci_af_flr(struct pci_dev *dev, int probe)
02064 {
02065 int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
02066 u8 status;
02067 u8 cap;
02068
02069 if (!cappos)
02070 return -ENOTTY;
02071 pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
02072 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
02073 return -ENOTTY;
02074
02075 if (probe)
02076 return 0;
02077
02078 pci_block_user_cfg_access(dev);
02079
02080
02081 msleep(100);
02082 pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
02083 if (status & PCI_AF_STATUS_TP) {
02084 dev_info(&dev->dev, "Busy after 100ms while trying to"
02085 " reset; sleeping for 1 second\n");
02086 ssleep(1);
02087 pci_read_config_byte(dev,
02088 cappos + PCI_AF_STATUS, &status);
02089 if (status & PCI_AF_STATUS_TP)
02090 dev_info(&dev->dev, "Still busy after 1s; "
02091 "proceeding with reset anyway\n");
02092 }
02093 pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
02094 mdelay(100);
02095
02096 pci_unblock_user_cfg_access(dev);
02097 return 0;
02098 }
02099
02100 static int __pci_reset_function(struct pci_dev *pdev, int probe)
02101 {
02102 int res;
02103
02104 res = __pcie_flr(pdev, probe);
02105 if (res != -ENOTTY)
02106 return res;
02107
02108 res = __pci_af_flr(pdev, probe);
02109 if (res != -ENOTTY)
02110 return res;
02111
02112 return res;
02113 }
02114
02115
02116
02117
02118
02119
02120
02121
02122
02123
02124
02125
02126
02127
02128
02129
02130
02131
02132 int pci_execute_reset_function(struct pci_dev *dev)
02133 {
02134 return __pci_reset_function(dev, 0);
02135 }
02136 EXPORT_SYMBOL_GPL(pci_execute_reset_function);
02137
02138
02139
02140
02141
02142
02143
02144
02145
02146
02147
02148
02149
02150
02151
02152
02153
02154 int pci_reset_function(struct pci_dev *dev)
02155 {
02156 int r = __pci_reset_function(dev, 1);
02157
02158 if (r < 0)
02159 return r;
02160
02161 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
02162 disable_irq(dev->irq);
02163 pci_save_state(dev);
02164
02165 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
02166
02167 r = pci_execute_reset_function(dev);
02168
02169 pci_restore_state(dev);
02170 if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
02171 enable_irq(dev->irq);
02172
02173 return r;
02174 }
02175 EXPORT_SYMBOL_GPL(pci_reset_function);
02176
02177
02178
02179
02180
02181
02182
02183
02184 int pcix_get_max_mmrbc(struct pci_dev *dev)
02185 {
02186 int err, cap;
02187 u32 stat;
02188
02189 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
02190 if (!cap)
02191 return -EINVAL;
02192
02193 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
02194 if (err)
02195 return -EINVAL;
02196
02197 return (stat & PCI_X_STATUS_MAX_READ) >> 12;
02198 }
02199 EXPORT_SYMBOL(pcix_get_max_mmrbc);
02200
02201
02202
02203
02204
02205
02206
02207
02208 int pcix_get_mmrbc(struct pci_dev *dev)
02209 {
02210 int ret, cap;
02211 u32 cmd;
02212
02213 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
02214 if (!cap)
02215 return -EINVAL;
02216
02217 ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
02218 if (!ret)
02219 ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
02220
02221 return ret;
02222 }
02223 EXPORT_SYMBOL(pcix_get_mmrbc);
02224
02225
02226
02227
02228
02229
02230
02231
02232
02233
02234 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
02235 {
02236 int cap, err = -EINVAL;
02237 u32 stat, cmd, v, o;
02238
02239 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
02240 goto out;
02241
02242 v = ffs(mmrbc) - 10;
02243
02244 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
02245 if (!cap)
02246 goto out;
02247
02248 err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
02249 if (err)
02250 goto out;
02251
02252 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
02253 return -E2BIG;
02254
02255 err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
02256 if (err)
02257 goto out;
02258
02259 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
02260 if (o != v) {
02261 if (v > o && dev->bus &&
02262 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
02263 return -EIO;
02264
02265 cmd &= ~PCI_X_CMD_MAX_READ;
02266 cmd |= v << 2;
02267 err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
02268 }
02269 out:
02270 return err;
02271 }
02272 EXPORT_SYMBOL(pcix_set_mmrbc);
02273
02274
02275
02276
02277
02278
02279
02280
02281 int pcie_get_readrq(struct pci_dev *dev)
02282 {
02283 int ret, cap;
02284 u16 ctl;
02285
02286 cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
02287 if (!cap)
02288 return -EINVAL;
02289
02290 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
02291 if (!ret)
02292 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
02293
02294 return ret;
02295 }
02296 EXPORT_SYMBOL(pcie_get_readrq);
02297
02298
02299
02300
02301
02302
02303
02304
02305
02306 int pcie_set_readrq(struct pci_dev *dev, int rq)
02307 {
02308 int cap, err = -EINVAL;
02309 u16 ctl, v;
02310
02311 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
02312 goto out;
02313
02314 v = (ffs(rq) - 8) << 12;
02315
02316 cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
02317 if (!cap)
02318 goto out;
02319
02320 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
02321 if (err)
02322 goto out;
02323
02324 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
02325 ctl &= ~PCI_EXP_DEVCTL_READRQ;
02326 ctl |= v;
02327 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
02328 }
02329
02330 out:
02331 return err;
02332 }
02333 EXPORT_SYMBOL(pcie_set_readrq);
02334
02335
02336
02337
02338
02339
02340
02341
02342 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
02343 {
02344 int i, bars = 0;
02345 for (i = 0; i < PCI_NUM_RESOURCES; i++)
02346 if (pci_resource_flags(dev, i) & flags)
02347 bars |= (1 << i);
02348 return bars;
02349 }
02350
02351
02352
02353
02354
02355
02356
02357
02358
02359 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
02360 {
02361 if (resno < PCI_ROM_RESOURCE) {
02362 *type = pci_bar_unknown;
02363 return PCI_BASE_ADDRESS_0 + 4 * resno;
02364 } else if (resno == PCI_ROM_RESOURCE) {
02365 *type = pci_bar_mem32;
02366 return dev->rom_base_reg;
02367 }
02368
02369 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
02370 return 0;
02371 }
02372
02373 static void __devinit pci_no_domains(void)
02374 {
02375 #ifdef CONFIG_PCI_DOMAINS
02376 pci_domains_supported = 0;
02377 #endif
02378 }
02379
02380
02381
02382
02383
02384
02385
02386
02387
02388 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
02389 {
02390 return 1;
02391 }
02392
02393 #ifndef DDE_LINUX
02394 static
02395 #endif
02396 int __devinit pci_init(void)
02397 {
02398 struct pci_dev *dev = NULL;
02399
02400 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
02401 pci_fixup_device(pci_fixup_final, dev);
02402 }
02403
02404 return 0;
02405 }
02406
02407 static int __init pci_setup(char *str)
02408 {
02409 #ifndef DDE_LINUX
02410 while (str) {
02411 char *k = strchr(str, ',');
02412 if (k)
02413 *k++ = 0;
02414 if (*str && (str = pcibios_setup(str)) && *str) {
02415 if (!strcmp(str, "nomsi")) {
02416 pci_no_msi();
02417 } else if (!strcmp(str, "noaer")) {
02418 pci_no_aer();
02419 } else if (!strcmp(str, "nodomains")) {
02420 pci_no_domains();
02421 } else if (!strncmp(str, "cbiosize=", 9)) {
02422 pci_cardbus_io_size = memparse(str + 9, &str);
02423 } else if (!strncmp(str, "cbmemsize=", 10)) {
02424 pci_cardbus_mem_size = memparse(str + 10, &str);
02425 } else {
02426 printk(KERN_ERR "PCI: Unknown option `%s'\n",
02427 str);
02428 }
02429 }
02430 str = k;
02431 }
02432 #endif
02433 return 0;
02434 }
02435 early_param("pci", pci_setup);
02436
02437 device_initcall(pci_init);
02438
02439 EXPORT_SYMBOL(pci_reenable_device);
02440 EXPORT_SYMBOL(pci_enable_device_io);
02441 EXPORT_SYMBOL(pci_enable_device_mem);
02442 EXPORT_SYMBOL(pci_enable_device);
02443 EXPORT_SYMBOL(pcim_enable_device);
02444 EXPORT_SYMBOL(pcim_pin_device);
02445 EXPORT_SYMBOL(pci_disable_device);
02446 EXPORT_SYMBOL(pci_find_capability);
02447 EXPORT_SYMBOL(pci_bus_find_capability);
02448 EXPORT_SYMBOL(pci_release_regions);
02449 EXPORT_SYMBOL(pci_request_regions);
02450 EXPORT_SYMBOL(pci_request_regions_exclusive);
02451 EXPORT_SYMBOL(pci_release_region);
02452 EXPORT_SYMBOL(pci_request_region);
02453 EXPORT_SYMBOL(pci_request_region_exclusive);
02454 EXPORT_SYMBOL(pci_release_selected_regions);
02455 EXPORT_SYMBOL(pci_request_selected_regions);
02456 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
02457 EXPORT_SYMBOL(pci_set_master);
02458 EXPORT_SYMBOL(pci_clear_master);
02459 EXPORT_SYMBOL(pci_set_mwi);
02460 EXPORT_SYMBOL(pci_try_set_mwi);
02461 EXPORT_SYMBOL(pci_clear_mwi);
02462 EXPORT_SYMBOL_GPL(pci_intx);
02463 EXPORT_SYMBOL(pci_set_dma_mask);
02464 EXPORT_SYMBOL(pci_set_consistent_dma_mask);
02465 EXPORT_SYMBOL(pci_assign_resource);
02466 EXPORT_SYMBOL(pci_find_parent_resource);
02467 EXPORT_SYMBOL(pci_select_bars);
02468
02469 EXPORT_SYMBOL(pci_set_power_state);
02470 EXPORT_SYMBOL(pci_save_state);
02471 EXPORT_SYMBOL(pci_restore_state);
02472 EXPORT_SYMBOL(pci_pme_capable);
02473 EXPORT_SYMBOL(pci_pme_active);
02474 EXPORT_SYMBOL(pci_enable_wake);
02475 EXPORT_SYMBOL(pci_wake_from_d3);
02476 EXPORT_SYMBOL(pci_target_state);
02477 EXPORT_SYMBOL(pci_prepare_to_sleep);
02478 EXPORT_SYMBOL(pci_back_from_sleep);
02479 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
02480