00001 #ifndef BLK_INTERNAL_H
00002 #define BLK_INTERNAL_H
00003
00004
00005 #define BLK_BATCH_TIME (HZ/50UL)
00006
00007
00008 #define BLK_BATCH_REQ 32
00009
00010 extern struct kmem_cache *blk_requestq_cachep;
00011 extern struct kobj_type blk_queue_ktype;
00012
00013 void init_request_from_bio(struct request *req, struct bio *bio);
00014 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
00015 struct bio *bio);
00016 void __blk_queue_free_tags(struct request_queue *q);
00017
00018 void blk_unplug_work(struct work_struct *work);
00019 void blk_unplug_timeout(unsigned long data);
00020 void blk_rq_timed_out_timer(unsigned long data);
00021 void blk_delete_timer(struct request *);
00022 void blk_add_timer(struct request *);
00023 void __generic_unplug_device(struct request_queue *);
00024
00025
00026
00027
00028 enum rq_atomic_flags {
00029 REQ_ATOM_COMPLETE = 0,
00030 };
00031
00032
00033
00034
00035
00036 static inline int blk_mark_rq_complete(struct request *rq)
00037 {
00038 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
00039 }
00040
00041 static inline void blk_clear_rq_complete(struct request *rq)
00042 {
00043 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
00044 }
00045
00046 #ifdef CONFIG_FAIL_IO_TIMEOUT
00047 int blk_should_fake_timeout(struct request_queue *);
00048 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
00049 ssize_t part_timeout_store(struct device *, struct device_attribute *,
00050 const char *, size_t);
00051 #else
00052 static inline int blk_should_fake_timeout(struct request_queue *q)
00053 {
00054 return 0;
00055 }
00056 #endif
00057
00058 struct io_context *current_io_context(gfp_t gfp_flags, int node);
00059
00060 int ll_back_merge_fn(struct request_queue *q, struct request *req,
00061 struct bio *bio);
00062 int ll_front_merge_fn(struct request_queue *q, struct request *req,
00063 struct bio *bio);
00064 int attempt_back_merge(struct request_queue *q, struct request *rq);
00065 int attempt_front_merge(struct request_queue *q, struct request *rq);
00066 void blk_recalc_rq_segments(struct request *rq);
00067 void blk_recalc_rq_sectors(struct request *rq, int nsect);
00068
00069 void blk_queue_congestion_threshold(struct request_queue *q);
00070
00071 int blk_dev_init(void);
00072
00073
00074
00075
00076
00077
00078 static inline int queue_congestion_on_threshold(struct request_queue *q)
00079 {
00080 return q->nr_congestion_on;
00081 }
00082
00083
00084
00085
00086 static inline int queue_congestion_off_threshold(struct request_queue *q)
00087 {
00088 return q->nr_congestion_off;
00089 }
00090
00091 #if defined(CONFIG_BLK_DEV_INTEGRITY)
00092
00093 #define rq_for_each_integrity_segment(bvl, _rq, _iter) \
00094 __rq_for_each_bio(_iter.bio, _rq) \
00095 bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
00096
00097 #endif
00098
00099 static inline int blk_cpu_to_group(int cpu)
00100 {
00101 #ifdef CONFIG_SCHED_MC
00102 const struct cpumask *mask = cpu_coregroup_mask(cpu);
00103 return cpumask_first(mask);
00104 #elif defined(CONFIG_SCHED_SMT)
00105 return first_cpu(per_cpu(cpu_sibling_map, cpu));
00106 #else
00107 return cpu;
00108 #endif
00109 }
00110
00111 static inline int blk_do_io_stat(struct request_queue *q)
00112 {
00113 if (q)
00114 return blk_queue_io_stat(q);
00115
00116 return 0;
00117 }
00118
00119 #endif