aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/boot/memory.c
Commit message (Collapse)AuthorAge
* Merge branch 'x86/urgent' into x86/setupH. Peter Anvin2009-05-23
|\ | | | | | | | | | | | | Resolved conflicts: arch/x86/boot/memory.c Signed-off-by: H. Peter Anvin <hpa@zytor.com>
| * x86, setup: revert ACPI 3 E820 extended attributes supportH. Peter Anvin2009-05-22
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Remove ACPI 3 E820 extended memory attributes support. At least one vendor actively set all the flags to zero, but left ECX on return at 24. This bug may be present in other BIOSes. The breakage functionally means the ACPI 3 flags are probably completely useless, and that no OS any time soon is going to rely on their existence. Therefore, drop support completely. We may want to revisit this question in the future, if we find ourselves actually needing the flags. This reverts all or part of the following checkins: cd670599b7b00d9263f6f11a05c0edeb9cbedaf3 c549e71d073a6e9a4847497344db28a784061455 However, retain the part from the latter commit that copies e820 into a temporary buffer; that is an unrelated BIOS workaround. Put in a comment to explain that part. See https://bugzilla.redhat.com/show_bug.cgi?id=499396 for some additional information. [ Impact: detect all memory on affected machines ] Reported-by: Thomas J. Baker <tjb@unh.edu> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Acked-by: Len Brown <len.brown@intel.com> Cc: Chuck Ebbert <cebbert@redhat.com> Cc: Kyle McMartin <kmcmartin@redhat.com> Cc: Matt Domsch <matt_domsch@dell.com>
* | x86, setup: "glove box" BIOS interrupts in the core boot codeH. Peter Anvin2009-04-09
|/ | | | | | | | | Impact: BIOS proofing "Glove box" off BIOS interrupts in the core boot code. LKML-Reference: <49DE7F79.4030106@zytor.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
* x86, setup: guard against pre-ACPI 3 e820 code not updating %ecxH. Peter Anvin2009-04-01
| | | | | | | | | | Impact: BIOS bug safety For pre-ACPI 3 BIOSes, pre-initialize the end of the e820 buffer just in case the BIOS returns an unchanged %ecx but without actually touching the ACPI 3 extended flags field. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
* x86, setup: ACPI 3, BIOS workaround for E820-probing codeH. Peter Anvin2009-03-28
| | | | | | | | | | | | | Impact: ACPI 3 spec compliance, BIOS bug workaround The ACPI 3 spec added another field to the E820 buffer -- which is backwards incompatible, since it contains a validity bit. Furthermore, there has been at least one report of a BIOS which assumes that the buffer it is pointed at is the same buffer as for the previous E820 call. Therefore, read the data into a temporary buffer and copy the standard part of it if and only if the valid bit is set. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
* x86, setup: preemptively save/restore edi and ebp around INT 15 E820H. Peter Anvin2009-03-28
| | | | | | | | | | Impact: BIOS bugproofing Since there are BIOSes known to clobber %ebx and %esi for INT 15 E820, assume there is something out there clobbering %edi and/or %ebp too, and don't wait for it to fail. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
* x86, setup: mark %esi as clobbered in E820 BIOS callMichael K. Johnson2009-03-28
| | | | | | | | | | | | | | Jordan Hargrave diagnosed a BIOS clobbering %esi in the E820 call. That particular BIOS has been fixed, but there is a possibility that this is responsible for other occasional reports of early boot failure, and it does not hurt to add %esi to the clobbers. -stable candidate patch. Cc: Justin Forbes <jmforbes@linuxtx.org> Signed-off-by: Michael K Johnson <johnsonm@rpath.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Cc: stable@kernel.org
* x86: fix build warnings in real mode codeAndi Kleen2008-08-18
| | | | | | | | | | | | | | | | | | | | | | | | | | | | This recent patch commit c3965bd15118742d72b4bc1a290d37b3f081eb98 Author: Paul Jackson <pj@sgi.com> Date: Wed May 14 08:15:34 2008 -0700 x86 boot: proper use of ARRAY_SIZE instead of repeated E820MAX constant caused these new warnings during a normal build: In file included from linux-2.6/arch/x86/boot/memory.c:17: linux-2.6/include/linux/log2.h: In function '__ilog2_u32': linux-2.6/include/linux/log2.h:34: warning: implicit declaration of function 'fls' linux-2.6/include/linux/log2.h: In function '__ilog2_u64': linux-2.6/include/linux/log2.h:42: warning: implicit declaration of function 'fls64' linux-2.6/include/linux/log2.h: In function '__roundup_pow_of_two ': linux-2.6/include/linux/log2.h:63: warning: implicit declaration of function 'fls_long' I tried to fix them in log2.h, but it's difficult because the real mode environment is completely different from a normal kernel environment. Instead define an own ARRAY_SIZE macro in boot.h, similar to the other private macros there. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
* x86 boot: proper use of ARRAY_SIZE instead of repeated E820MAX constantPaul Jackson2008-05-25
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | This patch is motivated by a subsequent patch which will allow for more memory map entries on EFI supported systems than can be passed via the x86 legacy BIOS E820 interface. The legacy interface is limited to E820MAX == 128 memory entries, and that "E820MAX" manifest constant was used as the size for several arrays and loops over those arrays. The primary change in this patch is to change code loop sizes over those arrays from using the constant E820MAX, to using the ARRAY_SIZE() macro evaluated for the array being looped. That way, a subsequent patch can change the size of some of these arrays, without breaking this code. This patch also adds a parameter to the sanitize_e820_map() routine, which had an implicit size for the array passed it of E820MAX entries. This new parameter explicitly passes the size of said array. Once again, this will allow a subsequent patch to change that array size for some calls to sanitize_e820_map() without breaking the code. As part of enhancing the sanitize_e820_map() interface this way, I further combined the unnecessarily distinct x86_32 and x86_64 declarations for this routine into a single, commonly used, declaration. This patch in itself should make no difference to the resulting kernel binary. [ mingo@elte.hu: merged to -tip ] Signed-off-by: Paul Jackson <pj@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
* x86: remove pointless commentsWANG Cong2008-04-19
| | | | | | | | | Remove old comments that include the old arch/i386 directory. Signed-off-by: WANG Cong <xiyou.wangcong@gmail.com> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* x86: handle BIOSes which terminate e820 with CF=1 and no SMAPH. Peter Anvin2008-02-26
| | | | | | | | | | | | | | | | | | | | | | The proper way to terminate the e820 chain is with %ebx == 0 on the last legitimate memory block. However, several BIOSes don't do that and instead return error (CF = 1) when trying to read off the end of the list. For this error return, %eax doesn't necessarily return the SMAP signature -- correctly so, since %ah should contain an error code in this case. To deal with some particularly broken BIOSes, we clear the entire e820 chain if the SMAP signature is missing in the middle, indicating a plain insane e820 implementation. However, we need to make the test for CF = 1 before the SMAP check. This fixes at least one HP laptop (nc6400) for which none of the memory-probing methods (e820, e801, 88) functioned fully according to spec. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* i386: move bootThomas Gleixner2007-10-11
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
href='#n205'>205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452






























                                                                               
                     





                                                  



                                                      























































































































                                                                           
                           



                               




                                                                              

















































































                                                                           
                                                               
                                                               
























                                                                               
                                                           














                                                       

                                                               





















































                                                                 























                                                                      
                              












                                                                         
                               





                                         
                                                           
                 




                                                              



                                            



                           
                                                    



                                             
                                                    



                                                 
 






                                                   
                                                      


          
                                
 












                                                            
                                    
                         
/*
 * IOMMU API for GART in Tegra20
 *
 * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

#define pr_fmt(fmt)	"%s(): " fmt, __func__

#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/of.h>

#include <asm/cacheflush.h>

/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES	(SZ_4K)

#define GART_REG_BASE		0x24
#define GART_CONFIG		(0x24 - GART_REG_BASE)
#define GART_ENTRY_ADDR		(0x28 - GART_REG_BASE)
#define GART_ENTRY_DATA		(0x2c - GART_REG_BASE)
#define GART_ENTRY_PHYS_ADDR_VALID	(1 << 31)

#define GART_PAGE_SHIFT		12
#define GART_PAGE_SIZE		(1 << GART_PAGE_SHIFT)
#define GART_PAGE_MASK						\
	(~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)

struct gart_client {
	struct device		*dev;
	struct list_head	list;
};

struct gart_device {
	void __iomem		*regs;
	u32			*savedata;
	u32			page_count;	/* total remappable size */
	dma_addr_t		iovmm_base;	/* offset to vmm_area */
	spinlock_t		pte_lock;	/* for pagetable */
	struct list_head	client;
	spinlock_t		client_lock;	/* for client list */
	struct device		*dev;
};

static struct gart_device *gart_handle; /* unique for a system */

#define GART_PTE(_pfn)						\
	(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))

/*
 * Any interaction between any block on PPSB and a block on APB or AHB
 * must have these read-back to ensure the APB/AHB bus transaction is
 * complete before initiating activity on the PPSB block.
 */
#define FLUSH_GART_REGS(gart)	((void)readl((gart)->regs + GART_CONFIG))

#define for_each_gart_pte(gart, iova)					\
	for (iova = gart->iovmm_base;					\
	     iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
	     iova += GART_PAGE_SIZE)

static inline void gart_set_pte(struct gart_device *gart,
				unsigned long offs, u32 pte)
{
	writel(offs, gart->regs + GART_ENTRY_ADDR);
	writel(pte, gart->regs + GART_ENTRY_DATA);

	dev_dbg(gart->dev, "%s %08lx:%08x\n",
		 pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
}

static inline unsigned long gart_read_pte(struct gart_device *gart,
					  unsigned long offs)
{
	unsigned long pte;

	writel(offs, gart->regs + GART_ENTRY_ADDR);
	pte = readl(gart->regs + GART_ENTRY_DATA);

	return pte;
}

static void do_gart_setup(struct gart_device *gart, const u32 *data)
{
	unsigned long iova;

	for_each_gart_pte(gart, iova)
		gart_set_pte(gart, iova, data ? *(data++) : 0);

	writel(1, gart->regs + GART_CONFIG);
	FLUSH_GART_REGS(gart);
}

#ifdef DEBUG
static void gart_dump_table(struct gart_device *gart)
{
	unsigned long iova;
	unsigned long flags;

	spin_lock_irqsave(&gart->pte_lock, flags);
	for_each_gart_pte(gart, iova) {
		unsigned long pte;

		pte = gart_read_pte(gart, iova);

		dev_dbg(gart->dev, "%s %08lx:%08lx\n",
			(GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
			iova, pte & GART_PAGE_MASK);
	}
	spin_unlock_irqrestore(&gart->pte_lock, flags);
}
#else
static inline void gart_dump_table(struct gart_device *gart)
{
}
#endif

static inline bool gart_iova_range_valid(struct gart_device *gart,
					 unsigned long iova, size_t bytes)
{
	unsigned long iova_start, iova_end, gart_start, gart_end;

	iova_start = iova;
	iova_end = iova_start + bytes - 1;
	gart_start = gart->iovmm_base;
	gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;

	if (iova_start < gart_start)
		return false;
	if (iova_end > gart_end)
		return false;
	return true;
}

static int gart_iommu_attach_dev(struct iommu_domain *domain,
				 struct device *dev)
{
	struct gart_device *gart;
	struct gart_client *client, *c;
	int err = 0;

	gart = gart_handle;
	if (!gart)
		return -EINVAL;
	domain->priv = gart;

	domain->geometry.aperture_start = gart->iovmm_base;
	domain->geometry.aperture_end   = gart->iovmm_base +
					gart->page_count * GART_PAGE_SIZE - 1;
	domain->geometry.force_aperture = true;

	client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
	if (!client)
		return -ENOMEM;
	client->dev = dev;

	spin_lock(&gart->client_lock);
	list_for_each_entry(c, &gart->client, list) {
		if (c->dev == dev) {
			dev_err(gart->dev,
				"%s is already attached\n", dev_name(dev));
			err = -EINVAL;
			goto fail;
		}
	}
	list_add(&client->list, &gart->client);
	spin_unlock(&gart->client_lock);
	dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
	return 0;

fail:
	devm_kfree(gart->dev, client);
	spin_unlock(&gart->client_lock);
	return err;
}

static void gart_iommu_detach_dev(struct iommu_domain *domain,
				  struct device *dev)
{
	struct gart_device *gart = domain->priv;
	struct gart_client *c;

	spin_lock(&gart->client_lock);

	list_for_each_entry(c, &gart->client, list) {
		if (c->dev == dev) {
			list_del(&c->list);
			devm_kfree(gart->dev, c);
			dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
			goto out;
		}
	}
	dev_err(gart->dev, "Couldn't find\n");
out:
	spin_unlock(&gart->client_lock);
}

static int gart_iommu_domain_init(struct iommu_domain *domain)
{
	return 0;
}

static void gart_iommu_domain_destroy(struct iommu_domain *domain)
{
	struct gart_device *gart = domain->priv;

	if (!gart)
		return;

	spin_lock(&gart->client_lock);
	if (!list_empty(&gart->client)) {
		struct gart_client *c;

		list_for_each_entry(c, &gart->client, list)
			gart_iommu_detach_dev(domain, c->dev);
	}
	spin_unlock(&gart->client_lock);
	domain->priv = NULL;
}

static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
			  phys_addr_t pa, size_t bytes, int prot)
{
	struct gart_device *gart = domain->priv;
	unsigned long flags;
	unsigned long pfn;

	if (!gart_iova_range_valid(gart, iova, bytes))
		return -EINVAL;

	spin_lock_irqsave(&gart->pte_lock, flags);
	pfn = __phys_to_pfn(pa);
	if (!pfn_valid(pfn)) {
		dev_err(gart->dev, "Invalid page: %pa\n", &pa);
		spin_unlock_irqrestore(&gart->pte_lock, flags);
		return -EINVAL;
	}
	gart_set_pte(gart, iova, GART_PTE(pfn));
	FLUSH_GART_REGS(gart);
	spin_unlock_irqrestore(&gart->pte_lock, flags);
	return 0;
}

static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
			       size_t bytes)
{
	struct gart_device *gart = domain->priv;
	unsigned long flags;

	if (!gart_iova_range_valid(gart, iova, bytes))
		return 0;

	spin_lock_irqsave(&gart->pte_lock, flags);
	gart_set_pte(gart, iova, 0);
	FLUSH_GART_REGS(gart);
	spin_unlock_irqrestore(&gart->pte_lock, flags);
	return 0;
}

static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
					   dma_addr_t iova)
{
	struct gart_device *gart = domain->priv;
	unsigned long pte;
	phys_addr_t pa;
	unsigned long flags;

	if (!gart_iova_range_valid(gart, iova, 0))
		return -EINVAL;

	spin_lock_irqsave(&gart->pte_lock, flags);
	pte = gart_read_pte(gart, iova);
	spin_unlock_irqrestore(&gart->pte_lock, flags);

	pa = (pte & GART_PAGE_MASK);
	if (!pfn_valid(__phys_to_pfn(pa))) {
		dev_err(gart->dev, "No entry for %08llx:%pa\n",
			 (unsigned long long)iova, &pa);
		gart_dump_table(gart);
		return -EINVAL;
	}
	return pa;
}

static int gart_iommu_domain_has_cap(struct iommu_domain *domain,
				     unsigned long cap)
{
	return 0;
}

static struct iommu_ops gart_iommu_ops = {
	.domain_init	= gart_iommu_domain_init,
	.domain_destroy	= gart_iommu_domain_destroy,
	.attach_dev	= gart_iommu_attach_dev,
	.detach_dev	= gart_iommu_detach_dev,
	.map		= gart_iommu_map,
	.unmap		= gart_iommu_unmap,
	.iova_to_phys	= gart_iommu_iova_to_phys,
	.domain_has_cap	= gart_iommu_domain_has_cap,
	.pgsize_bitmap	= GART_IOMMU_PGSIZES,
};

static int tegra_gart_suspend(struct device *dev)
{
	struct gart_device *gart = dev_get_drvdata(dev);
	unsigned long iova;
	u32 *data = gart->savedata;
	unsigned long flags;

	spin_lock_irqsave(&gart->pte_lock, flags);
	for_each_gart_pte(gart, iova)
		*(data++) = gart_read_pte(gart, iova);
	spin_unlock_irqrestore(&gart->pte_lock, flags);
	return 0;
}

static int tegra_gart_resume(struct device *dev)
{
	struct gart_device *gart = dev_get_drvdata(dev);
	unsigned long flags;

	spin_lock_irqsave(&gart->pte_lock, flags);
	do_gart_setup(gart, gart->savedata);
	spin_unlock_irqrestore(&gart->pte_lock, flags);
	return 0;
}

static int tegra_gart_probe(struct platform_device *pdev)
{
	struct gart_device *gart;
	struct resource *res, *res_remap;
	void __iomem *gart_regs;
	struct device *dev = &pdev->dev;

	if (gart_handle)
		return -EIO;

	BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);

	/* the GART memory aperture is required */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (!res || !res_remap) {
		dev_err(dev, "GART memory aperture expected\n");
		return -ENXIO;
	}

	gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
	if (!gart) {
		dev_err(dev, "failed to allocate gart_device\n");
		return -ENOMEM;
	}

	gart_regs = devm_ioremap(dev, res->start, resource_size(res));
	if (!gart_regs) {
		dev_err(dev, "failed to remap GART registers\n");
		return -ENXIO;
	}

	gart->dev = &pdev->dev;
	spin_lock_init(&gart->pte_lock);
	spin_lock_init(&gart->client_lock);
	INIT_LIST_HEAD(&gart->client);
	gart->regs = gart_regs;
	gart->iovmm_base = (dma_addr_t)res_remap->start;
	gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);

	gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
	if (!gart->savedata) {
		dev_err(dev, "failed to allocate context save area\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, gart);
	do_gart_setup(gart, NULL);

	gart_handle = gart;
	bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
	return 0;
}

static int tegra_gart_remove(struct platform_device *pdev)
{
	struct gart_device *gart = platform_get_drvdata(pdev);

	writel(0, gart->regs + GART_CONFIG);
	if (gart->savedata)
		vfree(gart->savedata);
	gart_handle = NULL;
	return 0;
}

static const struct dev_pm_ops tegra_gart_pm_ops = {
	.suspend	= tegra_gart_suspend,
	.resume		= tegra_gart_resume,
};

static struct of_device_id tegra_gart_of_match[] = {
	{ .compatible = "nvidia,tegra20-gart", },
	{ },
};
MODULE_DEVICE_TABLE(of, tegra_gart_of_match);

static struct platform_driver tegra_gart_driver = {
	.probe		= tegra_gart_probe,
	.remove		= tegra_gart_remove,
	.driver = {
		.owner	= THIS_MODULE,
		.name	= "tegra-gart",
		.pm	= &tegra_gart_pm_ops,
		.of_match_table = tegra_gart_of_match,
	},
};

static int tegra_gart_init(void)
{
	return platform_driver_register(&tegra_gart_driver);
}

static void __exit tegra_gart_exit(void)
{
	platform_driver_unregister(&tegra_gart_driver);
}

subsys_initcall(tegra_gart_init);
module_exit(tegra_gart_exit);

MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
MODULE_ALIAS("platform:tegra-gart");
MODULE_LICENSE("GPL v2");