aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2015-09-02 18:46:45 -0400
committerAndy Gross <agross@codeaurora.org>2015-10-14 15:51:20 -0400
commit9806884d8cd552e6926c162a022cc4b948f4abc8 (patch)
tree375b728da01197c50183643109a8128bdbb1b798
parent1a03964dec3cecb6382d172b9dfe318735c2cad7 (diff)
soc: qcom: smem: Handle big endian CPUs
The contents of smem are always in little endian, but the smem driver is not capable of being used on big endian CPUs. Annotate the little endian data members and update the code to do the proper byte swapping. Cc: Bjorn Andersson <bjorn.andersson@sonymobile.com> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Reviewed-by: Bjorn Andersson <bjorn.andersson@sonymobile.com> Signed-off-by: Andy Gross <agross@codeaurora.org>
-rw-r--r--drivers/soc/qcom/smem.c229
1 files changed, 138 insertions, 91 deletions
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index e6d0dae63845..74017114ce6e 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -92,9 +92,9 @@
92 * @params: parameters to the command 92 * @params: parameters to the command
93 */ 93 */
94struct smem_proc_comm { 94struct smem_proc_comm {
95 u32 command; 95 __le32 command;
96 u32 status; 96 __le32 status;
97 u32 params[2]; 97 __le32 params[2];
98}; 98};
99 99
100/** 100/**
@@ -106,10 +106,10 @@ struct smem_proc_comm {
106 * the default region. bits 0,1 are reserved 106 * the default region. bits 0,1 are reserved
107 */ 107 */
108struct smem_global_entry { 108struct smem_global_entry {
109 u32 allocated; 109 __le32 allocated;
110 u32 offset; 110 __le32 offset;
111 u32 size; 111 __le32 size;
112 u32 aux_base; /* bits 1:0 reserved */ 112 __le32 aux_base; /* bits 1:0 reserved */
113}; 113};
114#define AUX_BASE_MASK 0xfffffffc 114#define AUX_BASE_MASK 0xfffffffc
115 115
@@ -125,11 +125,11 @@ struct smem_global_entry {
125 */ 125 */
126struct smem_header { 126struct smem_header {
127 struct smem_proc_comm proc_comm[4]; 127 struct smem_proc_comm proc_comm[4];
128 u32 version[32]; 128 __le32 version[32];
129 u32 initialized; 129 __le32 initialized;
130 u32 free_offset; 130 __le32 free_offset;
131 u32 available; 131 __le32 available;
132 u32 reserved; 132 __le32 reserved;
133 struct smem_global_entry toc[SMEM_ITEM_COUNT]; 133 struct smem_global_entry toc[SMEM_ITEM_COUNT];
134}; 134};
135 135
@@ -143,12 +143,12 @@ struct smem_header {
143 * @reserved: reserved entries for later use 143 * @reserved: reserved entries for later use
144 */ 144 */
145struct smem_ptable_entry { 145struct smem_ptable_entry {
146 u32 offset; 146 __le32 offset;
147 u32 size; 147 __le32 size;
148 u32 flags; 148 __le32 flags;
149 u16 host0; 149 __le16 host0;
150 u16 host1; 150 __le16 host1;
151 u32 reserved[8]; 151 __le32 reserved[8];
152}; 152};
153 153
154/** 154/**
@@ -160,13 +160,14 @@ struct smem_ptable_entry {
160 * @entry: list of @smem_ptable_entry for the @num_entries partitions 160 * @entry: list of @smem_ptable_entry for the @num_entries partitions
161 */ 161 */
162struct smem_ptable { 162struct smem_ptable {
163 u32 magic; 163 u8 magic[4];
164 u32 version; 164 __le32 version;
165 u32 num_entries; 165 __le32 num_entries;
166 u32 reserved[5]; 166 __le32 reserved[5];
167 struct smem_ptable_entry entry[]; 167 struct smem_ptable_entry entry[];
168}; 168};
169#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */ 169
170static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
170 171
171/** 172/**
172 * struct smem_partition_header - header of the partitions 173 * struct smem_partition_header - header of the partitions
@@ -181,15 +182,16 @@ struct smem_ptable {
181 * @reserved: for now reserved entries 182 * @reserved: for now reserved entries
182 */ 183 */
183struct smem_partition_header { 184struct smem_partition_header {
184 u32 magic; 185 u8 magic[4];
185 u16 host0; 186 __le16 host0;
186 u16 host1; 187 __le16 host1;
187 u32 size; 188 __le32 size;
188 u32 offset_free_uncached; 189 __le32 offset_free_uncached;
189 u32 offset_free_cached; 190 __le32 offset_free_cached;
190 u32 reserved[3]; 191 __le32 reserved[3];
191}; 192};
192#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */ 193
194static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
193 195
194/** 196/**
195 * struct smem_private_entry - header of each item in the private partition 197 * struct smem_private_entry - header of each item in the private partition
@@ -201,12 +203,12 @@ struct smem_partition_header {
201 * @reserved: for now reserved entry 203 * @reserved: for now reserved entry
202 */ 204 */
203struct smem_private_entry { 205struct smem_private_entry {
204 u16 canary; 206 u16 canary; /* bytes are the same so no swapping needed */
205 u16 item; 207 __le16 item;
206 u32 size; /* includes padding bytes */ 208 __le32 size; /* includes padding bytes */
207 u16 padding_data; 209 __le16 padding_data;
208 u16 padding_hdr; 210 __le16 padding_hdr;
209 u32 reserved; 211 __le32 reserved;
210}; 212};
211#define SMEM_PRIVATE_CANARY 0xa5a5 213#define SMEM_PRIVATE_CANARY 0xa5a5
212 214
@@ -242,6 +244,45 @@ struct qcom_smem {
242 struct smem_region regions[0]; 244 struct smem_region regions[0];
243}; 245};
244 246
247static struct smem_private_entry *
248phdr_to_last_private_entry(struct smem_partition_header *phdr)
249{
250 void *p = phdr;
251
252 return p + le32_to_cpu(phdr->offset_free_uncached);
253}
254
255static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
256{
257 void *p = phdr;
258
259 return p + le32_to_cpu(phdr->offset_free_cached);
260}
261
262static struct smem_private_entry *
263phdr_to_first_private_entry(struct smem_partition_header *phdr)
264{
265 void *p = phdr;
266
267 return p + sizeof(*phdr);
268}
269
270static struct smem_private_entry *
271private_entry_next(struct smem_private_entry *e)
272{
273 void *p = e;
274
275 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
276 le32_to_cpu(e->size);
277}
278
279static void *entry_to_item(struct smem_private_entry *e)
280{
281 void *p = e;
282
283 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
284}
285
245/* Pointer to the one and only smem handle */ 286/* Pointer to the one and only smem handle */
246static struct qcom_smem *__smem; 287static struct qcom_smem *__smem;
247 288
@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
254 size_t size) 295 size_t size)
255{ 296{
256 struct smem_partition_header *phdr; 297 struct smem_partition_header *phdr;
257 struct smem_private_entry *hdr; 298 struct smem_private_entry *hdr, *end;
258 size_t alloc_size; 299 size_t alloc_size;
259 void *p; 300 void *cached;
260 301
261 phdr = smem->partitions[host]; 302 phdr = smem->partitions[host];
303 hdr = phdr_to_first_private_entry(phdr);
304 end = phdr_to_last_private_entry(phdr);
305 cached = phdr_to_first_cached_entry(phdr);
262 306
263 p = (void *)phdr + sizeof(*phdr); 307 while (hdr < end) {
264 while (p < (void *)phdr + phdr->offset_free_uncached) {
265 hdr = p;
266
267 if (hdr->canary != SMEM_PRIVATE_CANARY) { 308 if (hdr->canary != SMEM_PRIVATE_CANARY) {
268 dev_err(smem->dev, 309 dev_err(smem->dev,
269 "Found invalid canary in host %d partition\n", 310 "Found invalid canary in host %d partition\n",
@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
271 return -EINVAL; 312 return -EINVAL;
272 } 313 }
273 314
274 if (hdr->item == item) 315 if (le16_to_cpu(hdr->item) == item)
275 return -EEXIST; 316 return -EEXIST;
276 317
277 p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 318 hdr = private_entry_next(hdr);
278 } 319 }
279 320
280 /* Check that we don't grow into the cached region */ 321 /* Check that we don't grow into the cached region */
281 alloc_size = sizeof(*hdr) + ALIGN(size, 8); 322 alloc_size = sizeof(*hdr) + ALIGN(size, 8);
282 if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) { 323 if ((void *)hdr + alloc_size >= cached) {
283 dev_err(smem->dev, "Out of memory\n"); 324 dev_err(smem->dev, "Out of memory\n");
284 return -ENOSPC; 325 return -ENOSPC;
285 } 326 }
286 327
287 hdr = p;
288 hdr->canary = SMEM_PRIVATE_CANARY; 328 hdr->canary = SMEM_PRIVATE_CANARY;
289 hdr->item = item; 329 hdr->item = cpu_to_le16(item);
290 hdr->size = ALIGN(size, 8); 330 hdr->size = cpu_to_le32(ALIGN(size, 8));
291 hdr->padding_data = hdr->size - size; 331 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
292 hdr->padding_hdr = 0; 332 hdr->padding_hdr = 0;
293 333
294 /* 334 /*
@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
297 * gets a consistent view of the linked list. 337 * gets a consistent view of the linked list.
298 */ 338 */
299 wmb(); 339 wmb();
300 phdr->offset_free_uncached += alloc_size; 340 le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
301 341
302 return 0; 342 return 0;
303} 343}
@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
318 return -EEXIST; 358 return -EEXIST;
319 359
320 size = ALIGN(size, 8); 360 size = ALIGN(size, 8);
321 if (WARN_ON(size > header->available)) 361 if (WARN_ON(size > le32_to_cpu(header->available)))
322 return -ENOMEM; 362 return -ENOMEM;
323 363
324 entry->offset = header->free_offset; 364 entry->offset = header->free_offset;
325 entry->size = size; 365 entry->size = cpu_to_le32(size);
326 366
327 /* 367 /*
328 * Ensure the header is consistent before we mark the item allocated, 368 * Ensure the header is consistent before we mark the item allocated,
@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
330 * even though they do not take the spinlock on read. 370 * even though they do not take the spinlock on read.
331 */ 371 */
332 wmb(); 372 wmb();
333 entry->allocated = 1; 373 entry->allocated = cpu_to_le32(1);
334 374
335 header->free_offset += size; 375 le32_add_cpu(&header->free_offset, size);
336 header->available -= size; 376 le32_add_cpu(&header->available, -size);
337 377
338 return 0; 378 return 0;
339} 379}
@@ -396,15 +436,15 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
396 if (!entry->allocated) 436 if (!entry->allocated)
397 return ERR_PTR(-ENXIO); 437 return ERR_PTR(-ENXIO);
398 438
399 aux_base = entry->aux_base & AUX_BASE_MASK; 439 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
400 440
401 for (i = 0; i < smem->num_regions; i++) { 441 for (i = 0; i < smem->num_regions; i++) {
402 area = &smem->regions[i]; 442 area = &smem->regions[i];
403 443
404 if (area->aux_base == aux_base || !aux_base) { 444 if (area->aux_base == aux_base || !aux_base) {
405 if (size != NULL) 445 if (size != NULL)
406 *size = entry->size; 446 *size = le32_to_cpu(entry->size);
407 return area->virt_base + entry->offset; 447 return area->virt_base + le32_to_cpu(entry->offset);
408 } 448 }
409 } 449 }
410 450
@@ -417,30 +457,29 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
417 size_t *size) 457 size_t *size)
418{ 458{
419 struct smem_partition_header *phdr; 459 struct smem_partition_header *phdr;
420 struct smem_private_entry *hdr; 460 struct smem_private_entry *e, *end;
421 void *p;
422 461
423 phdr = smem->partitions[host]; 462 phdr = smem->partitions[host];
463 e = phdr_to_first_private_entry(phdr);
464 end = phdr_to_last_private_entry(phdr);
424 465
425 p = (void *)phdr + sizeof(*phdr); 466 while (e < end) {
426 while (p < (void *)phdr + phdr->offset_free_uncached) { 467 if (e->canary != SMEM_PRIVATE_CANARY) {
427 hdr = p;
428
429 if (hdr->canary != SMEM_PRIVATE_CANARY) {
430 dev_err(smem->dev, 468 dev_err(smem->dev,
431 "Found invalid canary in host %d partition\n", 469 "Found invalid canary in host %d partition\n",
432 host); 470 host);
433 return ERR_PTR(-EINVAL); 471 return ERR_PTR(-EINVAL);
434 } 472 }
435 473
436 if (hdr->item == item) { 474 if (le16_to_cpu(e->item) == item) {
437 if (size != NULL) 475 if (size != NULL)
438 *size = hdr->size - hdr->padding_data; 476 *size = le32_to_cpu(e->size) -
477 le16_to_cpu(e->padding_data);
439 478
440 return p + sizeof(*hdr) + hdr->padding_hdr; 479 return entry_to_item(e);
441 } 480 }
442 481
443 p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 482 e = private_entry_next(e);
444 } 483 }
445 484
446 return ERR_PTR(-ENOENT); 485 return ERR_PTR(-ENOENT);
@@ -500,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host)
500 539
501 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 540 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
502 phdr = __smem->partitions[host]; 541 phdr = __smem->partitions[host];
503 ret = phdr->offset_free_cached - phdr->offset_free_uncached; 542 ret = le32_to_cpu(phdr->offset_free_cached) -
543 le32_to_cpu(phdr->offset_free_uncached);
504 } else { 544 } else {
505 header = __smem->regions[0].virt_base; 545 header = __smem->regions[0].virt_base;
506 ret = header->available; 546 ret = le32_to_cpu(header->available);
507 } 547 }
508 548
509 return ret; 549 return ret;
@@ -512,7 +552,7 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
512 552
513static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 553static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
514{ 554{
515 unsigned *versions; 555 __le32 *versions;
516 size_t size; 556 size_t size;
517 557
518 versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); 558 versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
@@ -526,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
526 return -EINVAL; 566 return -EINVAL;
527 } 567 }
528 568
529 return versions[SMEM_MASTER_SBL_VERSION_INDEX]; 569 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
530} 570}
531 571
532static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, 572static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
@@ -536,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
536 struct smem_ptable_entry *entry; 576 struct smem_ptable_entry *entry;
537 struct smem_ptable *ptable; 577 struct smem_ptable *ptable;
538 unsigned remote_host; 578 unsigned remote_host;
579 u32 version, host0, host1;
539 int i; 580 int i;
540 581
541 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; 582 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
542 if (ptable->magic != SMEM_PTABLE_MAGIC) 583 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
543 return 0; 584 return 0;
544 585
545 if (ptable->version != 1) { 586 version = le32_to_cpu(ptable->version);
587 if (version != 1) {
546 dev_err(smem->dev, 588 dev_err(smem->dev,
547 "Unsupported partition header version %d\n", 589 "Unsupported partition header version %d\n", version);
548 ptable->version);
549 return -EINVAL; 590 return -EINVAL;
550 } 591 }
551 592
552 for (i = 0; i < ptable->num_entries; i++) { 593 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
553 entry = &ptable->entry[i]; 594 entry = &ptable->entry[i];
595 host0 = le16_to_cpu(entry->host0);
596 host1 = le16_to_cpu(entry->host1);
554 597
555 if (entry->host0 != local_host && entry->host1 != local_host) 598 if (host0 != local_host && host1 != local_host)
556 continue; 599 continue;
557 600
558 if (!entry->offset) 601 if (!le32_to_cpu(entry->offset))
559 continue; 602 continue;
560 603
561 if (!entry->size) 604 if (!le32_to_cpu(entry->size))
562 continue; 605 continue;
563 606
564 if (entry->host0 == local_host) 607 if (host0 == local_host)
565 remote_host = entry->host1; 608 remote_host = host1;
566 else 609 else
567 remote_host = entry->host0; 610 remote_host = host0;
568 611
569 if (remote_host >= SMEM_HOST_COUNT) { 612 if (remote_host >= SMEM_HOST_COUNT) {
570 dev_err(smem->dev, 613 dev_err(smem->dev,
@@ -580,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
580 return -EINVAL; 623 return -EINVAL;
581 } 624 }
582 625
583 header = smem->regions[0].virt_base + entry->offset; 626 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
627 host0 = le16_to_cpu(header->host0);
628 host1 = le16_to_cpu(header->host1);
584 629
585 if (header->magic != SMEM_PART_MAGIC) { 630 if (memcmp(header->magic, SMEM_PART_MAGIC,
631 sizeof(header->magic))) {
586 dev_err(smem->dev, 632 dev_err(smem->dev,
587 "Partition %d has invalid magic\n", i); 633 "Partition %d has invalid magic\n", i);
588 return -EINVAL; 634 return -EINVAL;
589 } 635 }
590 636
591 if (header->host0 != local_host && header->host1 != local_host) { 637 if (host0 != local_host && host1 != local_host) {
592 dev_err(smem->dev, 638 dev_err(smem->dev,
593 "Partition %d hosts are invalid\n", i); 639 "Partition %d hosts are invalid\n", i);
594 return -EINVAL; 640 return -EINVAL;
595 } 641 }
596 642
597 if (header->host0 != remote_host && header->host1 != remote_host) { 643 if (host0 != remote_host && host1 != remote_host) {
598 dev_err(smem->dev, 644 dev_err(smem->dev,
599 "Partition %d hosts are invalid\n", i); 645 "Partition %d hosts are invalid\n", i);
600 return -EINVAL; 646 return -EINVAL;
@@ -606,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
606 return -EINVAL; 652 return -EINVAL;
607 } 653 }
608 654
609 if (header->offset_free_uncached > header->size) { 655 if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
610 dev_err(smem->dev, 656 dev_err(smem->dev,
611 "Partition %d has invalid free pointer\n", i); 657 "Partition %d has invalid free pointer\n", i);
612 return -EINVAL; 658 return -EINVAL;
@@ -690,7 +736,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
690 } 736 }
691 737
692 header = smem->regions[0].virt_base; 738 header = smem->regions[0].virt_base;
693 if (header->initialized != 1 || header->reserved) { 739 if (le32_to_cpu(header->initialized) != 1 ||
740 le32_to_cpu(header->reserved)) {
694 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 741 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
695 return -EINVAL; 742 return -EINVAL;
696 } 743 }