aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/spi-nor/spi-nor.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/spi-nor/spi-nor.c')
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c130
1 files changed, 98 insertions, 32 deletions
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 3e54e31889c7..93c9bc8931fc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
2156 * @nor: pointer to a 'struct spi_nor' 2156 * @nor: pointer to a 'struct spi_nor'
2157 * @addr: offset in the serial flash memory 2157 * @addr: offset in the serial flash memory
2158 * @len: number of bytes to read 2158 * @len: number of bytes to read
2159 * @buf: buffer where the data is copied into 2159 * @buf: buffer where the data is copied into (dma-safe memory)
2160 * 2160 *
2161 * Return: 0 on success, -errno otherwise. 2161 * Return: 0 on success, -errno otherwise.
2162 */ 2162 */
@@ -2522,6 +2522,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2522} 2522}
2523 2523
2524/** 2524/**
2525 * spi_nor_sort_erase_mask() - sort erase mask
2526 * @map: the erase map of the SPI NOR
2527 * @erase_mask: the erase type mask to be sorted
2528 *
2529 * Replicate the sort done for the map's erase types in BFPT: sort the erase
2530 * mask in ascending order with the smallest erase type size starting from
2531 * BIT(0) in the sorted erase mask.
2532 *
2533 * Return: sorted erase mask.
2534 */
2535static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
2536{
2537 struct spi_nor_erase_type *erase_type = map->erase_type;
2538 int i;
2539 u8 sorted_erase_mask = 0;
2540
2541 if (!erase_mask)
2542 return 0;
2543
2544 /* Replicate the sort done for the map's erase types. */
2545 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2546 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
2547 sorted_erase_mask |= BIT(i);
2548
2549 return sorted_erase_mask;
2550}
2551
2552/**
2525 * spi_nor_regions_sort_erase_types() - sort erase types in each region 2553 * spi_nor_regions_sort_erase_types() - sort erase types in each region
2526 * @map: the erase map of the SPI NOR 2554 * @map: the erase map of the SPI NOR
2527 * 2555 *
@@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
2536static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) 2564static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
2537{ 2565{
2538 struct spi_nor_erase_region *region = map->regions; 2566 struct spi_nor_erase_region *region = map->regions;
2539 struct spi_nor_erase_type *erase_type = map->erase_type;
2540 int i;
2541 u8 region_erase_mask, sorted_erase_mask; 2567 u8 region_erase_mask, sorted_erase_mask;
2542 2568
2543 while (region) { 2569 while (region) {
2544 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 2570 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
2545 2571
2546 /* Replicate the sort done for the map's erase types. */ 2572 sorted_erase_mask = spi_nor_sort_erase_mask(map,
2547 sorted_erase_mask = 0; 2573 region_erase_mask);
2548 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
2549 if (erase_type[i].size &&
2550 region_erase_mask & BIT(erase_type[i].idx))
2551 sorted_erase_mask |= BIT(i);
2552 2574
2553 /* Overwrite erase mask. */ 2575 /* Overwrite erase mask. */
2554 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | 2576 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
2855 * spi_nor_get_map_in_use() - get the configuration map in use 2877 * spi_nor_get_map_in_use() - get the configuration map in use
2856 * @nor: pointer to a 'struct spi_nor' 2878 * @nor: pointer to a 'struct spi_nor'
2857 * @smpt: pointer to the sector map parameter table 2879 * @smpt: pointer to the sector map parameter table
2880 * @smpt_len: sector map parameter table length
2881 *
2882 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
2858 */ 2883 */
2859static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt) 2884static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
2885 u8 smpt_len)
2860{ 2886{
2861 const u32 *ret = NULL; 2887 const u32 *ret;
2862 u32 i, addr; 2888 u8 *buf;
2889 u32 addr;
2863 int err; 2890 int err;
2891 u8 i;
2864 u8 addr_width, read_opcode, read_dummy; 2892 u8 addr_width, read_opcode, read_dummy;
2865 u8 read_data_mask, data_byte, map_id; 2893 u8 read_data_mask, map_id;
2894
2895 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
2896 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
2897 if (!buf)
2898 return ERR_PTR(-ENOMEM);
2866 2899
2867 addr_width = nor->addr_width; 2900 addr_width = nor->addr_width;
2868 read_dummy = nor->read_dummy; 2901 read_dummy = nor->read_dummy;
2869 read_opcode = nor->read_opcode; 2902 read_opcode = nor->read_opcode;
2870 2903
2871 map_id = 0; 2904 map_id = 0;
2872 i = 0;
2873 /* Determine if there are any optional Detection Command Descriptors */ 2905 /* Determine if there are any optional Detection Command Descriptors */
2874 while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) { 2906 for (i = 0; i < smpt_len; i += 2) {
2907 if (smpt[i] & SMPT_DESC_TYPE_MAP)
2908 break;
2909
2875 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); 2910 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
2876 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); 2911 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
2877 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); 2912 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
2878 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); 2913 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
2879 addr = smpt[i + 1]; 2914 addr = smpt[i + 1];
2880 2915
2881 err = spi_nor_read_raw(nor, addr, 1, &data_byte); 2916 err = spi_nor_read_raw(nor, addr, 1, buf);
2882 if (err) 2917 if (err) {
2918 ret = ERR_PTR(err);
2883 goto out; 2919 goto out;
2920 }
2884 2921
2885 /* 2922 /*
2886 * Build an index value that is used to select the Sector Map 2923 * Build an index value that is used to select the Sector Map
2887 * Configuration that is currently in use. 2924 * Configuration that is currently in use.
2888 */ 2925 */
2889 map_id = map_id << 1 | !!(data_byte & read_data_mask); 2926 map_id = map_id << 1 | !!(*buf & read_data_mask);
2890 i = i + 2;
2891 } 2927 }
2892 2928
2893 /* Find the matching configuration map */ 2929 /*
2894 while (SMPT_MAP_ID(smpt[i]) != map_id) { 2930 * If command descriptors are provided, they always precede map
2931 * descriptors in the table. There is no need to start the iteration
2932 * over smpt array all over again.
2933 *
2934 * Find the matching configuration map.
2935 */
2936 ret = ERR_PTR(-EINVAL);
2937 while (i < smpt_len) {
2938 if (SMPT_MAP_ID(smpt[i]) == map_id) {
2939 ret = smpt + i;
2940 break;
2941 }
2942
2943 /*
2944 * If there are no more configuration map descriptors and no
2945 * configuration ID matched the configuration identifier, the
2946 * sector address map is unknown.
2947 */
2895 if (smpt[i] & SMPT_DESC_END) 2948 if (smpt[i] & SMPT_DESC_END)
2896 goto out; 2949 break;
2950
2897 /* increment the table index to the next map */ 2951 /* increment the table index to the next map */
2898 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; 2952 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
2899 } 2953 }
2900 2954
2901 ret = smpt + i;
2902 /* fall through */ 2955 /* fall through */
2903out: 2956out:
2957 kfree(buf);
2904 nor->addr_width = addr_width; 2958 nor->addr_width = addr_width;
2905 nor->read_dummy = read_dummy; 2959 nor->read_dummy = read_dummy;
2906 nor->read_opcode = read_opcode; 2960 nor->read_opcode = read_opcode;
@@ -2946,7 +3000,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2946 u64 offset; 3000 u64 offset;
2947 u32 region_count; 3001 u32 region_count;
2948 int i, j; 3002 int i, j;
2949 u8 erase_type; 3003 u8 erase_type, uniform_erase_type;
2950 3004
2951 region_count = SMPT_MAP_REGION_COUNT(*smpt); 3005 region_count = SMPT_MAP_REGION_COUNT(*smpt);
2952 /* 3006 /*
@@ -2959,7 +3013,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2959 return -ENOMEM; 3013 return -ENOMEM;
2960 map->regions = region; 3014 map->regions = region;
2961 3015
2962 map->uniform_erase_type = 0xff; 3016 uniform_erase_type = 0xff;
2963 offset = 0; 3017 offset = 0;
2964 /* Populate regions. */ 3018 /* Populate regions. */
2965 for (i = 0; i < region_count; i++) { 3019 for (i = 0; i < region_count; i++) {
@@ -2974,12 +3028,15 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
2974 * Save the erase types that are supported in all regions and 3028 * Save the erase types that are supported in all regions and
2975 * can erase the entire flash memory. 3029 * can erase the entire flash memory.
2976 */ 3030 */
2977 map->uniform_erase_type &= erase_type; 3031 uniform_erase_type &= erase_type;
2978 3032
2979 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + 3033 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
2980 region[i].size; 3034 region[i].size;
2981 } 3035 }
2982 3036
3037 map->uniform_erase_type = spi_nor_sort_erase_mask(map,
3038 uniform_erase_type);
3039
2983 spi_nor_region_mark_end(&region[i - 1]); 3040 spi_nor_region_mark_end(&region[i - 1]);
2984 3041
2985 return 0; 3042 return 0;
@@ -3020,9 +3077,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
3020 for (i = 0; i < smpt_header->length; i++) 3077 for (i = 0; i < smpt_header->length; i++)
3021 smpt[i] = le32_to_cpu(smpt[i]); 3078 smpt[i] = le32_to_cpu(smpt[i]);
3022 3079
3023 sector_map = spi_nor_get_map_in_use(nor, smpt); 3080 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
3024 if (!sector_map) { 3081 if (IS_ERR(sector_map)) {
3025 ret = -EINVAL; 3082 ret = PTR_ERR(sector_map);
3026 goto out; 3083 goto out;
3027 } 3084 }
3028 3085
@@ -3125,7 +3182,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
3125 if (err) 3182 if (err)
3126 goto exit; 3183 goto exit;
3127 3184
3128 /* Parse other parameter headers. */ 3185 /* Parse optional parameter tables. */
3129 for (i = 0; i < header.nph; i++) { 3186 for (i = 0; i < header.nph; i++) {
3130 param_header = &param_headers[i]; 3187 param_header = &param_headers[i];
3131 3188
@@ -3138,8 +3195,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
3138 break; 3195 break;
3139 } 3196 }
3140 3197
3141 if (err) 3198 if (err) {
3142 goto exit; 3199 dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
3200 SFDP_PARAM_HEADER_ID(param_header));
3201 /*
3202 * Let's not drop all information we extracted so far
3203 * if optional table parsers fail. In case of failing,
3204 * each optional parser is responsible to roll back to
3205 * the previously known spi_nor data.
3206 */
3207 err = 0;
3208 }
3143 } 3209 }
3144 3210
3145exit: 3211exit: