aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Huewe <peterhuewe@gmx.de>2015-03-14 19:54:43 -0400
committerPeter Huewe <peterhuewe@gmx.de>2015-03-18 17:43:07 -0400
commit44506436d707ee89db4c7cf7b5bf07c9b8ae71b0 (patch)
tree37d98b345158a396a89b6741141fe526c1422830
parent92a2c6b26b72a65714e8433bb0ee2ad1866df5cf (diff)
tpm: Update KConfig text to include TPM2.0 FIFO chips
I got a lot of requests lately about whether the new TPM2.0 support includes the FIFO interface for TPM2.0 as well. The FIFO interface is handled by tpm_tis since FIFO=TIS (more or less). -> Update the helptext and headline Signed-off-by: Peter Huewe <peterhuewe@gmx.de>
-rw-r--r--drivers/char/tpm/Kconfig9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 2dc16d3b2336..3b84a8b1bfbe 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -25,13 +25,14 @@ menuconfig TCG_TPM
25if TCG_TPM 25if TCG_TPM
26 26
27config TCG_TIS 27config TCG_TIS
28 tristate "TPM Interface Specification 1.2 Interface" 28 tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
29 depends on X86 29 depends on X86
30 ---help--- 30 ---help---
31 If you have a TPM security chip that is compliant with the 31 If you have a TPM security chip that is compliant with the
32 TCG TIS 1.2 TPM specification say Yes and it will be accessible 32 TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
33 from within Linux. To compile this driver as a module, choose 33 specification (TPM2.0) say Yes and it will be accessible from
34 M here; the module will be called tpm_tis. 34 within Linux. To compile this driver as a module, choose M here;
35 the module will be called tpm_tis.
35 36
36config TCG_TIS_I2C_ATMEL 37config TCG_TIS_I2C_ATMEL
37 tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)" 38 tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
creates and allocates a guaranteed size, preallocated * memory pool. The pool can be used from the mempool_alloc() and mempool_free() * functions. This function might sleep. Both the alloc_fn() and the free_fn() * functions might sleep - as long as the mempool_alloc() function is not called * from IRQ contexts. */ mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1); } EXPORT_SYMBOL(mempool_create); mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, int node_id) { mempool_t *pool; pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); if (!pool) return NULL; pool->elements = kmalloc_node(min_nr * sizeof(void *), GFP_KERNEL, node_id); if (!pool->elements) { kfree(pool); return NULL; } spin_lock_init(&pool->lock); pool->min_nr = min_nr; pool->pool_data = pool_data; init_waitqueue_head(&pool->wait); pool->alloc = alloc_fn; pool->free = free_fn; /* * First pre-allocate the guaranteed number of buffers. */ while (pool->curr_nr < pool->min_nr) { void *element; element = pool->alloc(GFP_KERNEL, pool->pool_data); if (unlikely(!element)) { free_pool(pool); return NULL; } add_element(pool, element); } return pool; } EXPORT_SYMBOL(mempool_create_node); /** * mempool_resize - resize an existing memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @new_min_nr: the new minimum number of elements guaranteed to be * allocated for this pool. * @gfp_mask: the usual allocation bitmask. * * This function shrinks/grows the pool. In the case of growing, * it cannot be guaranteed that the pool will be grown to the new * size immediately, but new mempool_free() calls will refill it. * * Note, the caller must guarantee that no mempool_destroy is called * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. */ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) { void *element; void **new_elements; unsigned long flags; BUG_ON(new_min_nr <= 0); spin_lock_irqsave(&pool->lock, flags); if (new_min_nr <= pool->min_nr) { while (new_min_nr < pool->curr_nr) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); spin_lock_irqsave(&pool->lock, flags); } pool->min_nr = new_min_nr; goto out_unlock; } spin_unlock_irqrestore(&pool->lock, flags); /* Grow the pool */ new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); if (!new_elements) return -ENOMEM; spin_lock_irqsave(&pool->lock, flags); if (unlikely(new_min_nr <= pool->min_nr)) { /* Raced, other resize will do our work */ spin_unlock_irqrestore(&pool->lock, flags); kfree(new_elements); goto out; } memcpy(new_elements, pool->elements, pool->curr_nr * sizeof(*new_elements)); kfree(pool->elements); pool->elements = new_elements; pool->min_nr = new_min_nr; while (pool->curr_nr < pool->min_nr) { spin_unlock_irqrestore(&pool->lock, flags); element = pool->alloc(gfp_mask, pool->pool_data); if (!element) goto out; spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) { add_element(pool, element); } else { spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); /* Raced */ goto out; } } out_unlock: spin_unlock_irqrestore(&pool->lock, flags); out: return 0; } EXPORT_SYMBOL(mempool_resize); /** * mempool_destroy - deallocate a memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * this function only sleeps if the free_fn() function sleeps. The caller * has to guarantee that all elements have been returned to the pool (ie: * freed) prior to calling mempool_destroy(). */ void mempool_destroy(mempool_t *pool) { /* Check for outstanding elements */ BUG_ON(pool->curr_nr != pool->min_nr); free_pool(pool); } EXPORT_SYMBOL(mempool_destroy); /** * mempool_alloc - allocate an element from a specific memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @gfp_mask: the usual allocation bitmask. * * this function only sleeps if the alloc_fn() function sleeps or * returns NULL. Note that due to preallocation, this function * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) */ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; wait_queue_t wait; gfp_t gfp_temp; might_sleep_if(gfp_mask & __GFP_WAIT); gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ gfp_mask |= __GFP_NOWARN; /* failures are OK */ gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); repeat_alloc: element = pool->alloc(gfp_temp, pool->pool_data); if (likely(element != NULL)) return element; spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); return element; } spin_unlock_irqrestore(&pool->lock, flags); /* We must not sleep in the GFP_ATOMIC case */ if (!(gfp_mask & __GFP_WAIT)) return NULL; /* Now start performing page reclaim */ gfp_temp = gfp_mask; init_wait(&wait); prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); smp_mb(); if (!pool->curr_nr) { /* * FIXME: this should be io_schedule(). The timeout is there * as a workaround for some DM problems in 2.6.18. */ io_schedule_timeout(5*HZ); } finish_wait(&pool->wait, &wait); goto repeat_alloc; } EXPORT_SYMBOL(mempool_alloc); /** * mempool_free - return an element to the pool. * @element: pool element pointer. * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * this function only sleeps if the free_fn() function sleeps. */ void mempool_free(void *element, mempool_t *pool) { unsigned long flags; if (unlikely(element == NULL)) return; smp_mb(); if (pool->curr_nr < pool->min_nr) { spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) { add_element(pool, element); spin_unlock_irqrestore(&pool->lock, flags); wake_up(&pool->wait); return; } spin_unlock_irqrestore(&pool->lock, flags); } pool->free(element, pool->pool_data); } EXPORT_SYMBOL(mempool_free); /* * A commonly used alloc and free fn. */ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) { struct kmem_cache *mem = pool_data; return kmem_cache_alloc(mem, gfp_mask); } EXPORT_SYMBOL(mempool_alloc_slab); void mempool_free_slab(void *element, void *pool_data) { struct kmem_cache *mem = pool_data; kmem_cache_free(mem, element); } EXPORT_SYMBOL(mempool_free_slab); /* * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory * specified by pool_data */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) { size_t size = (size_t)(long)pool_data; return kmalloc(size, gfp_mask); } EXPORT_SYMBOL(mempool_kmalloc); void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) { size_t size = (size_t) pool_data; return kzalloc(size, gfp_mask); } EXPORT_SYMBOL(mempool_kzalloc); void mempool_kfree(void *element, void *pool_data) { kfree(element); } EXPORT_SYMBOL(mempool_kfree); /* * A simple mempool-backed page allocator that allocates pages * of the order specified by pool_data. */ void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) { int order = (int)(long)pool_data; return alloc_pages(gfp_mask, order); } EXPORT_SYMBOL(mempool_alloc_pages); void mempool_free_pages(void *element, void *pool_data) { int order = (int)(long)pool_data; __free_pages(element, order); } EXPORT_SYMBOL(mempool_free_pages);