aboutsummaryrefslogtreecommitdiffstats
path: root/crypto/eseqiv.c
Commit message (Expand)AuthorAge
* crypto: Use scatterwalk_crypto_chainSteffen Klassert2010-12-02
* crypto: eseqiv - Fix IV generation for sync algorithmsSteffen Klassert2009-04-15
* crypto: skcipher - Use RNG interface instead of get_random_bytesHerbert Xu2008-08-29
* crypto: skcipher - Move IV generators into their own modulesHerbert Xu2008-08-29
* [CRYPTO] eseqiv: Fix off-by-one encryptionHerbert Xu2008-05-01
* [CRYPTO] skcipher: Fix section mismatchesHerbert Xu2008-03-08
* [CRYPTO] skcipher: Move chainiv/seqiv into crypto_blkcipher moduleHerbert Xu2008-02-22
* [CRYPTO] eseqiv: Add Encrypted Sequence Number IV GeneratorHerbert Xu2008-01-10
-locks.git/.git/commit/block/blk-merge.c?h=update_litmus_2019&id=d6d48196961729b79509dcaeeb0f0fb4fbb47d29'>d6d481969617
1e42807918d1
ae03bf639a50

1e42807918d1




d6d481969617
1e42807918d1



d6d481969617
1e42807918d1


867714271051
1e42807918d1




59247eaea50c
d6d481969617

59247eaea50c



1e42807918d1





59247eaea50c
d6d481969617



d6d481969617
1e42807918d1
d6d481969617
59247eaea50c
d6d481969617
d6d481969617






e692cb668fdd
d6d481969617

867714271051
ae03bf639a50
d6d481969617

e17fc0a1ccf8





d6d481969617
e17fc0a1ccf8
d6d481969617







d6d481969617












e692cb668fdd
d6d481969617









ae03bf639a50
d6d481969617
































f18573abcc57

2e46e8b27aa5


f18573abcc57




2fb98e8414c4
7b6d91daee5c
db0a2e0099be

d6d481969617






7a85f8896f4b
d6d481969617






d6d481969617

d6d481969617



d6d481969617

13f05c8d8e98




d6d481969617




d6d481969617

13f05c8d8e98





d6d481969617





d6d481969617
33659ebbae26
ae03bf639a50
d6d481969617
ae03bf639a50
d6d481969617
83096ebf1263
d6d481969617




2cdf79cafbd1
d6d481969617
2cdf79cafbd1
d6d481969617
d6d481969617



6728cb0e6343
d6d481969617


d6d481969617
33659ebbae26
ae03bf639a50
d6d481969617
ae03bf639a50
d6d481969617

83096ebf1263
d6d481969617




2cdf79cafbd1
d6d481969617
2cdf79cafbd1
d6d481969617
d6d481969617







867714271051

d6d481969617










ae03bf639a50
d6d481969617


867714271051




d6d481969617
867714271051
d6d481969617
8a78362c4eef
d6d481969617

13f05c8d8e98


d6d481969617

d6d481969617


80a761fd33cf





























26308eab69aa






09e099d4bafe
26308eab69aa

316d315bffa4
26308eab69aa
6c23a9681c0f
26308eab69aa



d6d481969617









f281fb5fe54e











d6d481969617

83096ebf1263
d6d481969617
















80a761fd33cf












d6d481969617










a2dec7b36364
d6d481969617


42dad7647aec



d6d481969617

ab780f1ece0d

d6d481969617
1cd96c242a82

d6d481969617






















5e84ea3a9c66





1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473










                                                  
                                                                     
                                                             
 
                                          

                                            
                                
 

                         
 
                   
                                       
                     
                         






                                                                               
                                                                              
                                            
                                                 
                                      

                                                                




                                                                       
 



                                                       
            


                                                                   
 




                                              
                           

         



                                                                    





                                               
                                                                        



                                                                   
                                       
 
                            
                                                                 
                           






                                                                            
                                  

                         
                                                            
                                      

                         





                                                                       
          
                                                                          







                                          












                                                                          
                                       









                                             
                                                                            
































                                                                                

                                                      


                                                                  




                                         
                                                           
                                              

                                                                          






                                                                  
                                                   






                                

                             



                                                            

                                                     




                                                                         




                                                                   

                                              





                                      





                                                                  
 
                                                         
                                                      
            
                                                   
 
                                                                   




                                              
                                                      
                                                      
                                             
                                             



                                              
                                                                   


                                      
 
                                                         
                                                      
            
                                                   

 
                                                                   




                                              
                                             
                                             
                                                  
                                                  







                                                                             

                                                                              










                                                                  
                                                                                


                                                                             




                                                                   
                                      
         
 
                                                        

                         


                                                                          

                                                    


                 





























                                                                      






                                                     
                                 

                                            
                                                           
 
                                    



                                   









                                                                      











                                                                              

                         
                                                                      
















                                                              












                                                                     










                                                            
                                              


                                         



                                                            

                                                             

                                     
 

                                                       






















                                                                    





                                                                      
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
					     struct bio *bio)
{
	struct bio_vec *bv, *bvprv = NULL;
	int cluster, i, high, highprv = 1;
	unsigned int seg_size, nr_phys_segs;
	struct bio *fbio, *bbio;

	if (!bio)
		return 0;

	fbio = bio;
	cluster = blk_queue_cluster(q);
	seg_size = 0;
	nr_phys_segs = 0;
	for_each_bio(bio) {
		bio_for_each_segment(bv, bio, i) {
			/*
			 * the trick here is making sure that a high page is
			 * never considered part of another segment, since that
			 * might change with the bounce page.
			 */
			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
			if (high || highprv)
				goto new_segment;
			if (cluster) {
				if (seg_size + bv->bv_len
				    > queue_max_segment_size(q))
					goto new_segment;
				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
					goto new_segment;
				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
					goto new_segment;

				seg_size += bv->bv_len;
				bvprv = bv;
				continue;
			}
new_segment:
			if (nr_phys_segs == 1 && seg_size >
			    fbio->bi_seg_front_size)
				fbio->bi_seg_front_size = seg_size;

			nr_phys_segs++;
			bvprv = bv;
			seg_size = bv->bv_len;
			highprv = high;
		}
		bbio = bio;
	}

	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
		fbio->bi_seg_front_size = seg_size;
	if (seg_size > bbio->bi_seg_back_size)
		bbio->bi_seg_back_size = seg_size;

	return nr_phys_segs;
}

void blk_recalc_rq_segments(struct request *rq)
{
	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
	struct bio *nxt = bio->bi_next;

	bio->bi_next = NULL;
	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
	bio->bi_next = nxt;
	bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);

static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
	if (!blk_queue_cluster(q))
		return 0;

	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
	    queue_max_segment_size(q))
		return 0;

	if (!bio_has_data(bio))
		return 1;

	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
		return 0;

	/*
	 * bio and nxt are contiguous in memory; check if the queue allows
	 * these two to be merged into one
	 */
	if (BIO_SEG_BOUNDARY(q, bio, nxt))
		return 1;

	return 0;
}

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
 */
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
		  struct scatterlist *sglist)
{
	struct bio_vec *bvec, *bvprv;
	struct req_iterator iter;
	struct scatterlist *sg;
	int nsegs, cluster;

	nsegs = 0;
	cluster = blk_queue_cluster(q);

	/*
	 * for each bio in rq
	 */
	bvprv = NULL;
	sg = NULL;
	rq_for_each_segment(bvec, rq, iter) {
		int nbytes = bvec->bv_len;

		if (bvprv && cluster) {
			if (sg->length + nbytes > queue_max_segment_size(q))
				goto new_segment;

			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
				goto new_segment;
			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
				goto new_segment;

			sg->length += nbytes;
		} else {
new_segment:
			if (!sg)
				sg = sglist;
			else {
				/*
				 * If the driver previously mapped a shorter
				 * list, we could see a termination bit
				 * prematurely unless it fully inits the sg
				 * table on each mapping. We KNOW that there
				 * must be more entries here or the driver
				 * would be buggy, so force clear the
				 * termination bit to avoid doing a full
				 * sg_init_table() in drivers for each command.
				 */
				sg->page_link &= ~0x02;
				sg = sg_next(sg);
			}

			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
			nsegs++;
		}
		bvprv = bvec;
	} /* segments in rq */


	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
		unsigned int pad_len =
			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;

		sg->length += pad_len;
		rq->extra_len += pad_len;
	}

	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
		if (rq->cmd_flags & REQ_WRITE)
			memset(q->dma_drain_buffer, 0, q->dma_drain_size);

		sg->page_link &= ~0x02;
		sg = sg_next(sg);
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
			    q->dma_drain_size,
			    ((unsigned long)q->dma_drain_buffer) &
			    (PAGE_SIZE - 1));
		nsegs++;
		rq->extra_len += q->dma_drain_size;
	}

	if (sg)
		sg_mark_end(sg);

	return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg);

static inline int ll_new_hw_segment(struct request_queue *q,
				    struct request *req,
				    struct bio *bio)
{
	int nr_phys_segs = bio_phys_segments(q, bio);

	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
		goto no_merge;

	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
		goto no_merge;

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;

no_merge:
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
	return 0;
}

int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
{
	unsigned short max_sectors;

	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
		max_sectors = queue_max_hw_sectors(q);
	else
		max_sectors = queue_max_sectors(q);

	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
		blk_recount_segments(q, req->biotail);
	if (!bio_flagged(bio, BIO_SEG_VALID))
		blk_recount_segments(q, bio);

	return ll_new_hw_segment(q, req, bio);
}

int ll_front_merge_fn(struct request_queue *q, struct request *req,
		      struct bio *bio)
{
	unsigned short max_sectors;

	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
		max_sectors = queue_max_hw_sectors(q);
	else
		max_sectors = queue_max_sectors(q);


	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
	if (!bio_flagged(bio, BIO_SEG_VALID))
		blk_recount_segments(q, bio);
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}

static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;
	unsigned int seg_size =
		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;

	/*
	 * First check if the either of the requests are re-queued
	 * requests.  Can't merge them if they are.
	 */
	if (req->special || next->special)
		return 0;

	/*
	 * Will it become too large?
	 */
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
		if (req->nr_phys_segments == 1)
			req->bio->bi_seg_front_size = seg_size;
		if (next->nr_phys_segments == 1)
			next->biotail->bi_seg_back_size = seg_size;
		total_phys_segments--;
	}

	if (total_phys_segments > queue_max_segments(q))
		return 0;

	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
		return 0;

	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
}

/**
 * blk_rq_set_mixed_merge - mark a request as mixed merge
 * @rq: request to mark as mixed merge
 *
 * Description:
 *     @rq is about to be mixed merged.  Make sure the attributes
 *     which can be mixed are set in each bio and mark @rq as mixed
 *     merged.
 */
void blk_rq_set_mixed_merge(struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	struct bio *bio;

	if (rq->cmd_flags & REQ_MIXED_MERGE)
		return;

	/*
	 * @rq will no longer represent mixable attributes for all the
	 * contained bios.  It will just track those of the first one.
	 * Distributes the attributs to each bio.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
		bio->bi_rw |= ff;
	}
	rq->cmd_flags |= REQ_MIXED_MERGE;
}

static void blk_account_io_merge(struct request *req)
{
	if (blk_do_io_stat(req)) {
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
		part = req->part;

		part_round_stats(cpu, part);
		part_dec_in_flight(part, rq_data_dir(req));

		hd_struct_put(part);
		part_stat_unlock();
	}
}

/*
 * Has to be called with the request spinlock acquired
 */
static int attempt_merge(struct request_queue *q, struct request *req,
			  struct request *next)
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

	/*
	 * Don't merge file system requests and discard requests
	 */
	if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
		return 0;

	/*
	 * Don't merge discard requests and secure discard requests
	 */
	if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
		return 0;

	/*
	 * not contiguous
	 */
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
		return 0;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
	    || next->special)
		return 0;

	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

	/*
	 * At this point we have either done a back merge
	 * or front merge. We need the smaller start_time of
	 * the merged requests to be the current request
	 * for accounting purposes.
	 */
	if (time_after(req->start_time, next->start_time))
		req->start_time = next->start_time;

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

	req->__data_len += blk_rq_bytes(next);

	elv_merge_requests(q, req, next);

	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);

	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
	if (blk_rq_cpu_valid(next))
		req->cpu = next->cpu;

	/* owner-ship of bio passed from next to req */
	next->bio = NULL;
	__blk_put_request(q, next);
	return 1;
}

int attempt_back_merge(struct request_queue *q, struct request *rq)
{
	struct request *next = elv_latter_request(q, rq);

	if (next)
		return attempt_merge(q, rq, next);

	return 0;
}

int attempt_front_merge(struct request_queue *q, struct request *rq)
{
	struct request *prev = elv_former_request(q, rq);

	if (prev)
		return attempt_merge(q, prev, rq);

	return 0;
}

int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
	return attempt_merge(q, rq, next);
}