aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/soc/qcom/smem.c69
1 files changed, 61 insertions, 8 deletions
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index b451dbc4aa39..c28275be0038 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -52,7 +52,8 @@
52 * 52 *
53 * Items in the non-cached region are allocated from the start of the partition 53 * Items in the non-cached region are allocated from the start of the partition
54 * while items in the cached region are allocated from the end. The free area 54 * while items in the cached region are allocated from the end. The free area
55 * is hence the region between the cached and non-cached offsets. 55 * is hence the region between the cached and non-cached offsets. The header of
56 * cached items comes after the data.
56 * 57 *
57 * 58 *
58 * To synchronize allocations in the shared memory heaps a remote spinlock must 59 * To synchronize allocations in the shared memory heaps a remote spinlock must
@@ -140,6 +141,7 @@ struct smem_header {
140 * @flags: flags for the partition (currently unused) 141 * @flags: flags for the partition (currently unused)
141 * @host0: first processor/host with access to this partition 142 * @host0: first processor/host with access to this partition
142 * @host1: second processor/host with access to this partition 143 * @host1: second processor/host with access to this partition
144 * @cacheline: alignment for "cached" entries
143 * @reserved: reserved entries for later use 145 * @reserved: reserved entries for later use
144 */ 146 */
145struct smem_ptable_entry { 147struct smem_ptable_entry {
@@ -148,7 +150,8 @@ struct smem_ptable_entry {
148 __le32 flags; 150 __le32 flags;
149 __le16 host0; 151 __le16 host0;
150 __le16 host1; 152 __le16 host1;
151 __le32 reserved[8]; 153 __le32 cacheline;
154 __le32 reserved[7];
152}; 155};
153 156
154/** 157/**
@@ -230,6 +233,7 @@ struct smem_region {
230 * @hwlock: reference to a hwspinlock 233 * @hwlock: reference to a hwspinlock
231 * @partitions: list of pointers to partitions affecting the current 234 * @partitions: list of pointers to partitions affecting the current
232 * processor/host 235 * processor/host
236 * @cacheline: list of cacheline sizes for each host
233 * @num_regions: number of @regions 237 * @num_regions: number of @regions
234 * @regions: list of the memory regions defining the shared memory 238 * @regions: list of the memory regions defining the shared memory
235 */ 239 */
@@ -239,6 +243,7 @@ struct qcom_smem {
239 struct hwspinlock *hwlock; 243 struct hwspinlock *hwlock;
240 244
241 struct smem_partition_header *partitions[SMEM_HOST_COUNT]; 245 struct smem_partition_header *partitions[SMEM_HOST_COUNT];
246 size_t cacheline[SMEM_HOST_COUNT];
242 247
243 unsigned num_regions; 248 unsigned num_regions;
244 struct smem_region regions[0]; 249 struct smem_region regions[0];
@@ -252,6 +257,14 @@ phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
252 return p + le32_to_cpu(phdr->offset_free_uncached); 257 return p + le32_to_cpu(phdr->offset_free_uncached);
253} 258}
254 259
260static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
261 size_t cacheline)
262{
263 void *p = phdr;
264
265 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
266}
267
255static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr) 268static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
256{ 269{
257 void *p = phdr; 270 void *p = phdr;
@@ -276,6 +289,14 @@ uncached_entry_next(struct smem_private_entry *e)
276 le32_to_cpu(e->size); 289 le32_to_cpu(e->size);
277} 290}
278 291
292static struct smem_private_entry *
293cached_entry_next(struct smem_private_entry *e, size_t cacheline)
294{
295 void *p = e;
296
297 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
298}
299
279static void *uncached_entry_to_item(struct smem_private_entry *e) 300static void *uncached_entry_to_item(struct smem_private_entry *e)
280{ 301{
281 void *p = e; 302 void *p = e;
@@ -283,6 +304,13 @@ static void *uncached_entry_to_item(struct smem_private_entry *e)
283 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); 304 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
284} 305}
285 306
307static void *cached_entry_to_item(struct smem_private_entry *e)
308{
309 void *p = e;
310
311 return p - le32_to_cpu(e->size);
312}
313
286/* Pointer to the one and only smem handle */ 314/* Pointer to the one and only smem handle */
287static struct qcom_smem *__smem; 315static struct qcom_smem *__smem;
288 316
@@ -458,18 +486,17 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
458{ 486{
459 struct smem_partition_header *phdr; 487 struct smem_partition_header *phdr;
460 struct smem_private_entry *e, *end; 488 struct smem_private_entry *e, *end;
489 size_t cacheline;
461 490
462 phdr = smem->partitions[host]; 491 phdr = smem->partitions[host];
492 cacheline = smem->cacheline[host];
493
463 e = phdr_to_first_uncached_entry(phdr); 494 e = phdr_to_first_uncached_entry(phdr);
464 end = phdr_to_last_uncached_entry(phdr); 495 end = phdr_to_last_uncached_entry(phdr);
465 496
466 while (e < end) { 497 while (e < end) {
467 if (e->canary != SMEM_PRIVATE_CANARY) { 498 if (e->canary != SMEM_PRIVATE_CANARY)
468 dev_err(smem->dev, 499 goto invalid_canary;
469 "Found invalid canary in host %d partition\n",
470 host);
471 return ERR_PTR(-EINVAL);
472 }
473 500
474 if (le16_to_cpu(e->item) == item) { 501 if (le16_to_cpu(e->item) == item) {
475 if (size != NULL) 502 if (size != NULL)
@@ -482,7 +509,32 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
482 e = uncached_entry_next(e); 509 e = uncached_entry_next(e);
483 } 510 }
484 511
512 /* Item was not found in the uncached list, search the cached list */
513
514 e = phdr_to_first_cached_entry(phdr, cacheline);
515 end = phdr_to_last_cached_entry(phdr);
516
517 while (e > end) {
518 if (e->canary != SMEM_PRIVATE_CANARY)
519 goto invalid_canary;
520
521 if (le16_to_cpu(e->item) == item) {
522 if (size != NULL)
523 *size = le32_to_cpu(e->size) -
524 le16_to_cpu(e->padding_data);
525
526 return cached_entry_to_item(e);
527 }
528
529 e = cached_entry_next(e, cacheline);
530 }
531
485 return ERR_PTR(-ENOENT); 532 return ERR_PTR(-ENOENT);
533
534invalid_canary:
535 dev_err(smem->dev, "Found invalid canary in host %d partition\n", host);
536
537 return ERR_PTR(-EINVAL);
486} 538}
487 539
488/** 540/**
@@ -659,6 +711,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
659 } 711 }
660 712
661 smem->partitions[remote_host] = header; 713 smem->partitions[remote_host] = header;
714 smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
662 } 715 }
663 716
664 return 0; 717 return 0;