aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThor Thayer <thor.thayer@linux.intel.com>2019-04-04 10:36:37 -0400
committerBorislav Petkov <bp@suse.de>2019-04-10 15:02:49 -0400
commit74676a8e247a2163a25fb0ef98f17b74fbad9d21 (patch)
tree8e9e1110343d37f0a0b4779238aa2931f14ecd28
parent71eec083eef10b1529f2300d6c3553f0949733a5 (diff)
arm64: dts: stratix10: Use new Stratix10 EDAC bindings
Use the new Stratix10 binding format for EDAC nodes. Signed-off-by: Thor Thayer <thor.thayer@linux.intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Dinh Nguyen <dinguyen@kernel.org> Cc: James Morse <james.morse@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: devicetree@vger.kernel.org Cc: linux-edac <linux-edac@vger.kernel.org> Cc: mark.rutland@arm.com Cc: mchehab@kernel.org Link: https://lkml.kernel.org/r/1554388597-28019-4-git-send-email-thor.thayer@linux.intel.com
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi25
1 files changed, 13 insertions, 12 deletions
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 7c649f6b14cb..e00ad770fa39 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -531,11 +531,12 @@
531 }; 531 };
532 532
533 eccmgr { 533 eccmgr {
534 compatible = "altr,socfpga-a10-ecc-manager"; 534 compatible = "altr,socfpga-s10-ecc-manager",
535 "altr,socfpga-a10-ecc-manager";
535 altr,sysmgr-syscon = <&sysmgr>; 536 altr,sysmgr-syscon = <&sysmgr>;
536 #address-cells = <1>; 537 #address-cells = <1>;
537 #size-cells = <1>; 538 #size-cells = <1>;
538 interrupts = <0 15 4>, <0 95 4>; 539 interrupts = <0 15 4>;
539 interrupt-controller; 540 interrupt-controller;
540 #interrupt-cells = <2>; 541 #interrupt-cells = <2>;
541 ranges; 542 ranges;
@@ -543,31 +544,31 @@
543 sdramedac { 544 sdramedac {
544 compatible = "altr,sdram-edac-s10"; 545 compatible = "altr,sdram-edac-s10";
545 altr,sdr-syscon = <&sdr>; 546 altr,sdr-syscon = <&sdr>;
546 interrupts = <16 4>, <48 4>; 547 interrupts = <16 4>;
547 }; 548 };
548 549
549 usb0-ecc@ff8c4000 { 550 usb0-ecc@ff8c4000 {
550 compatible = "altr,socfpga-usb-ecc"; 551 compatible = "altr,socfpga-s10-usb-ecc",
552 "altr,socfpga-usb-ecc";
551 reg = <0xff8c4000 0x100>; 553 reg = <0xff8c4000 0x100>;
552 altr,ecc-parent = <&usb0>; 554 altr,ecc-parent = <&usb0>;
553 interrupts = <2 4>, 555 interrupts = <2 4>;
554 <34 4>;
555 }; 556 };
556 557
557 emac0-rx-ecc@ff8c0000 { 558 emac0-rx-ecc@ff8c0000 {
558 compatible = "altr,socfpga-eth-mac-ecc"; 559 compatible = "altr,socfpga-s10-eth-mac-ecc",
560 "altr,socfpga-eth-mac-ecc";
559 reg = <0xff8c0000 0x100>; 561 reg = <0xff8c0000 0x100>;
560 altr,ecc-parent = <&gmac0>; 562 altr,ecc-parent = <&gmac0>;
561 interrupts = <4 4>, 563 interrupts = <4 4>;
562 <36 4>;
563 }; 564 };
564 565
565 emac0-tx-ecc@ff8c0400 { 566 emac0-tx-ecc@ff8c0400 {
566 compatible = "altr,socfpga-eth-mac-ecc"; 567 compatible = "altr,socfpga-s10-eth-mac-ecc",
568 "altr,socfpga-eth-mac-ecc";
567 reg = <0xff8c0400 0x100>; 569 reg = <0xff8c0400 0x100>;
568 altr,ecc-parent = <&gmac0>; 570 altr,ecc-parent = <&gmac0>;
569 interrupts = <5 4>, 571 interrupts = <5 4>;
570 <37 4>;
571 }; 572 };
572 573
573 }; 574 };
pre>195 196 197 198 199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300







                                           
                      






                          

                         

                               

































































































                                                                                  
                                              
                                                  























                                                                    
                                 


                     
                                                         
 
                                           

 
                                                             
 

                                               











                                                                           
                                                        
 
                                                  

 
                                                            
 
                                                      

 
                                                                              












                                                        
                                                             




                                 






                                                                                
























                                                                        
                                             
 



































                                                                      
                                                                     



                        

                                           
                                     


                       




                                                                           
                                                       






                                       






                                              
/* bounce buffer handling for block devices
 *
 * - Split from highmem.c
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
#include <linux/gfp.h>
#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>

#include <trace/events/block.h>

#define POOL_SIZE	64
#define ISA_POOL_SIZE	16

static mempool_t *page_pool, *isa_page_pool;

#ifdef CONFIG_HIGHMEM
static __init int init_emergency_pool(void)
{
	struct sysinfo i;
	si_meminfo(&i);
	si_swapinfo(&i);

	if (!i.totalhigh)
		return 0;

	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
	BUG_ON(!page_pool);
	printk("highmem bounce pool size: %d pages\n", POOL_SIZE);

	return 0;
}

__initcall(init_emergency_pool);

/*
 * highmem version, map in to vec
 */
static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
{
	unsigned long flags;
	unsigned char *vto;

	local_irq_save(flags);
	vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
	kunmap_atomic(vto, KM_BOUNCE_READ);
	local_irq_restore(flags);
}

#else /* CONFIG_HIGHMEM */

#define bounce_copy_vec(to, vfrom)	\
	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)

#endif /* CONFIG_HIGHMEM */

/*
 * allocate pages in the DMA region for the ISA pool
 */
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
{
	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}

/*
 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
 * as the max address, so check if the pool has already been created.
 */
int init_emergency_isa_pool(void)
{
	if (isa_page_pool)
		return 0;

	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
				       mempool_free_pages, (void *) 0);
	BUG_ON(!isa_page_pool);

	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
	return 0;
}

/*
 * Simple bounce buffer support for highmem pages. Depending on the
 * queue gfp mask set, *to may or may not be a highmem page. kmap it
 * always, it will do the Right Thing
 */
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
	unsigned char *vfrom;
	struct bio_vec *tovec, *fromvec;
	int i;

	__bio_for_each_segment(tovec, to, i, 0) {
		fromvec = from->bi_io_vec + i;

		/*
		 * not bounced
		 */
		if (tovec->bv_page == fromvec->bv_page)
			continue;

		/*
		 * fromvec->bv_offset and fromvec->bv_len might have been
		 * modified by the block layer, so use the original copy,
		 * bounce_copy_vec already uses tovec->bv_len
		 */
		vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;

		bounce_copy_vec(tovec, vfrom);
		flush_dcache_page(tovec->bv_page);
	}
}

static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
{
	struct bio *bio_orig = bio->bi_private;
	struct bio_vec *bvec, *org_vec;
	int i;

	if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
		set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);

	/*
	 * free up bounce indirect pages used
	 */
	__bio_for_each_segment(bvec, bio, i, 0) {
		org_vec = bio_orig->bi_io_vec + i;
		if (bvec->bv_page == org_vec->bv_page)
			continue;

		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
		mempool_free(bvec->bv_page, pool);
	}

	bio_endio(bio_orig, err);
	bio_put(bio);
}

static void bounce_end_io_write(struct bio *bio, int err)
{
	bounce_end_io(bio, page_pool, err);
}

static void bounce_end_io_write_isa(struct bio *bio, int err)
{

	bounce_end_io(bio, isa_page_pool, err);
}

static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
{
	struct bio *bio_orig = bio->bi_private;

	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
		copy_to_high_bio_irq(bio_orig, bio);

	bounce_end_io(bio, pool, err);
}

static void bounce_end_io_read(struct bio *bio, int err)
{
	__bounce_end_io_read(bio, page_pool, err);
}

static void bounce_end_io_read_isa(struct bio *bio, int err)
{
	__bounce_end_io_read(bio, isa_page_pool, err);
}

static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
			       mempool_t *pool)
{
	struct page *page;
	struct bio *bio = NULL;
	int i, rw = bio_data_dir(*bio_orig);
	struct bio_vec *to, *from;

	bio_for_each_segment(from, *bio_orig, i) {
		page = from->bv_page;

		/*
		 * is destination page below bounce pfn?
		 */
		if (page_to_pfn(page) <= queue_bounce_pfn(q))
			continue;

		/*
		 * irk, bounce it
		 */
		if (!bio) {
			unsigned int cnt = (*bio_orig)->bi_vcnt;

			bio = bio_alloc(GFP_NOIO, cnt);
			memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
		}
			

		to = bio->bi_io_vec + i;

		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
		to->bv_len = from->bv_len;
		to->bv_offset = from->bv_offset;
		inc_zone_page_state(to->bv_page, NR_BOUNCE);

		if (rw == WRITE) {
			char *vto, *vfrom;

			flush_dcache_page(from->bv_page);
			vto = page_address(to->bv_page) + to->bv_offset;
			vfrom = kmap(from->bv_page) + from->bv_offset;
			memcpy(vto, vfrom, to->bv_len);
			kunmap(from->bv_page);
		}
	}

	/*
	 * no pages bounced
	 */
	if (!bio)
		return;

	trace_block_bio_bounce(q, *bio_orig);

	/*
	 * at least one page was bounced, fill in possible non-highmem
	 * pages
	 */
	__bio_for_each_segment(from, *bio_orig, i, 0) {
		to = bio_iovec_idx(bio, i);
		if (!to->bv_page) {
			to->bv_page = from->bv_page;
			to->bv_len = from->bv_len;
			to->bv_offset = from->bv_offset;
		}
	}

	bio->bi_bdev = (*bio_orig)->bi_bdev;
	bio->bi_flags |= (1 << BIO_BOUNCED);
	bio->bi_sector = (*bio_orig)->bi_sector;
	bio->bi_rw = (*bio_orig)->bi_rw;

	bio->bi_vcnt = (*bio_orig)->bi_vcnt;
	bio->bi_idx = (*bio_orig)->bi_idx;
	bio->bi_size = (*bio_orig)->bi_size;

	if (pool == page_pool) {
		bio->bi_end_io = bounce_end_io_write;
		if (rw == READ)
			bio->bi_end_io = bounce_end_io_read;
	} else {
		bio->bi_end_io = bounce_end_io_write_isa;
		if (rw == READ)
			bio->bi_end_io = bounce_end_io_read_isa;
	}

	bio->bi_private = *bio_orig;
	*bio_orig = bio;
}

void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
	mempool_t *pool;

	/*
	 * Data-less bio, nothing to bounce
	 */
	if (!bio_has_data(*bio_orig))
		return;

	/*
	 * for non-isa bounce case, just check if the bounce pfn is equal
	 * to or bigger than the highest pfn in the system -- in that case,
	 * don't waste time iterating over bio segments
	 */
	if (!(q->bounce_gfp & GFP_DMA)) {
		if (queue_bounce_pfn(q) >= blk_max_pfn)
			return;
		pool = page_pool;
	} else {
		BUG_ON(!isa_page_pool);
		pool = isa_page_pool;
	}

	/*
	 * slow path
	 */
	__blk_queue_bounce(q, bio_orig, pool);
}

EXPORT_SYMBOL(blk_queue_bounce);