diff options
| author | Len Brown <len.brown@intel.com> | 2019-05-13 13:59:00 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2019-05-23 04:08:36 -0400 |
| commit | b2ce1c883df91a231f8138935167273c1767ad66 (patch) | |
| tree | 558b6e49c32c90592911559f1001b80cc1657066 | |
| parent | cb63ba0f670df1f0ddf21c6cc4bbe74db398742c (diff) | |
thermal/x86_pkg_temp_thermal: Cosmetic: Rename internal variables to zones from packages
Syntax update only -- no logical or functional change.
In response to the new multi-die/package changes, update variable names to
use the more generic thermal "zone" terminology, instead of "package", as
the zones can refer to either packages or die.
Signed-off-by: Len Brown <len.brown@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Zhang Rui <rui.zhang@intel.com>
Link: https://lkml.kernel.org/r/b65494a76be13481dc3a809c75debb2574c34eda.1557769318.git.len.brown@intel.com
| -rw-r--r-- | drivers/thermal/intel/x86_pkg_temp_thermal.c | 142 |
1 files changed, 72 insertions, 70 deletions
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 405b3858900a..87e929ffb0cb 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c | |||
| @@ -55,7 +55,7 @@ MODULE_PARM_DESC(notify_delay_ms, | |||
| 55 | */ | 55 | */ |
| 56 | #define MAX_NUMBER_OF_TRIPS 2 | 56 | #define MAX_NUMBER_OF_TRIPS 2 |
| 57 | 57 | ||
| 58 | struct pkg_device { | 58 | struct zone_device { |
| 59 | int cpu; | 59 | int cpu; |
| 60 | bool work_scheduled; | 60 | bool work_scheduled; |
| 61 | u32 tj_max; | 61 | u32 tj_max; |
| @@ -70,10 +70,10 @@ static struct thermal_zone_params pkg_temp_tz_params = { | |||
| 70 | .no_hwmon = true, | 70 | .no_hwmon = true, |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | /* Keep track of how many package pointers we allocated in init() */ | 73 | /* Keep track of how many zone pointers we allocated in init() */ |
| 74 | static int max_packages __read_mostly; | 74 | static int max_id __read_mostly; |
| 75 | /* Array of package pointers */ | 75 | /* Array of zone pointers */ |
| 76 | static struct pkg_device **packages; | 76 | static struct zone_device **zones; |
| 77 | /* Serializes interrupt notification, work and hotplug */ | 77 | /* Serializes interrupt notification, work and hotplug */ |
| 78 | static DEFINE_SPINLOCK(pkg_temp_lock); | 78 | static DEFINE_SPINLOCK(pkg_temp_lock); |
| 79 | /* Protects zone operation in the work function against hotplug removal */ | 79 | /* Protects zone operation in the work function against hotplug removal */ |
| @@ -120,12 +120,12 @@ err_out: | |||
| 120 | * | 120 | * |
| 121 | * - Other callsites: Must hold pkg_temp_lock | 121 | * - Other callsites: Must hold pkg_temp_lock |
| 122 | */ | 122 | */ |
| 123 | static struct pkg_device *pkg_temp_thermal_get_dev(unsigned int cpu) | 123 | static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu) |
| 124 | { | 124 | { |
| 125 | int pkgid = topology_logical_die_id(cpu); | 125 | int id = topology_logical_die_id(cpu); |
| 126 | 126 | ||
| 127 | if (pkgid >= 0 && pkgid < max_packages) | 127 | if (id >= 0 && id < max_id) |
| 128 | return packages[pkgid]; | 128 | return zones[id]; |
| 129 | return NULL; | 129 | return NULL; |
| 130 | } | 130 | } |
| 131 | 131 | ||
| @@ -150,12 +150,13 @@ static int get_tj_max(int cpu, u32 *tj_max) | |||
| 150 | 150 | ||
| 151 | static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) | 151 | static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) |
| 152 | { | 152 | { |
| 153 | struct pkg_device *pkgdev = tzd->devdata; | 153 | struct zone_device *zonedev = tzd->devdata; |
| 154 | u32 eax, edx; | 154 | u32 eax, edx; |
| 155 | 155 | ||
| 156 | rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, &eax, &edx); | 156 | rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_STATUS, |
| 157 | &eax, &edx); | ||
| 157 | if (eax & 0x80000000) { | 158 | if (eax & 0x80000000) { |
| 158 | *temp = pkgdev->tj_max - ((eax >> 16) & 0x7f) * 1000; | 159 | *temp = zonedev->tj_max - ((eax >> 16) & 0x7f) * 1000; |
| 159 | pr_debug("sys_get_curr_temp %d\n", *temp); | 160 | pr_debug("sys_get_curr_temp %d\n", *temp); |
| 160 | return 0; | 161 | return 0; |
| 161 | } | 162 | } |
| @@ -165,7 +166,7 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp) | |||
| 165 | static int sys_get_trip_temp(struct thermal_zone_device *tzd, | 166 | static int sys_get_trip_temp(struct thermal_zone_device *tzd, |
| 166 | int trip, int *temp) | 167 | int trip, int *temp) |
| 167 | { | 168 | { |
| 168 | struct pkg_device *pkgdev = tzd->devdata; | 169 | struct zone_device *zonedev = tzd->devdata; |
| 169 | unsigned long thres_reg_value; | 170 | unsigned long thres_reg_value; |
| 170 | u32 mask, shift, eax, edx; | 171 | u32 mask, shift, eax, edx; |
| 171 | int ret; | 172 | int ret; |
| @@ -181,14 +182,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, | |||
| 181 | shift = THERM_SHIFT_THRESHOLD0; | 182 | shift = THERM_SHIFT_THRESHOLD0; |
| 182 | } | 183 | } |
| 183 | 184 | ||
| 184 | ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, | 185 | ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, |
| 185 | &eax, &edx); | 186 | &eax, &edx); |
| 186 | if (ret < 0) | 187 | if (ret < 0) |
| 187 | return ret; | 188 | return ret; |
| 188 | 189 | ||
| 189 | thres_reg_value = (eax & mask) >> shift; | 190 | thres_reg_value = (eax & mask) >> shift; |
| 190 | if (thres_reg_value) | 191 | if (thres_reg_value) |
| 191 | *temp = pkgdev->tj_max - thres_reg_value * 1000; | 192 | *temp = zonedev->tj_max - thres_reg_value * 1000; |
| 192 | else | 193 | else |
| 193 | *temp = 0; | 194 | *temp = 0; |
| 194 | pr_debug("sys_get_trip_temp %d\n", *temp); | 195 | pr_debug("sys_get_trip_temp %d\n", *temp); |
| @@ -199,14 +200,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, | |||
| 199 | static int | 200 | static int |
| 200 | sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) | 201 | sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) |
| 201 | { | 202 | { |
| 202 | struct pkg_device *pkgdev = tzd->devdata; | 203 | struct zone_device *zonedev = tzd->devdata; |
| 203 | u32 l, h, mask, shift, intr; | 204 | u32 l, h, mask, shift, intr; |
| 204 | int ret; | 205 | int ret; |
| 205 | 206 | ||
| 206 | if (trip >= MAX_NUMBER_OF_TRIPS || temp >= pkgdev->tj_max) | 207 | if (trip >= MAX_NUMBER_OF_TRIPS || temp >= zonedev->tj_max) |
| 207 | return -EINVAL; | 208 | return -EINVAL; |
| 208 | 209 | ||
| 209 | ret = rdmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, | 210 | ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, |
| 210 | &l, &h); | 211 | &l, &h); |
| 211 | if (ret < 0) | 212 | if (ret < 0) |
| 212 | return ret; | 213 | return ret; |
| @@ -228,11 +229,12 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp) | |||
| 228 | if (!temp) { | 229 | if (!temp) { |
| 229 | l &= ~intr; | 230 | l &= ~intr; |
| 230 | } else { | 231 | } else { |
| 231 | l |= (pkgdev->tj_max - temp)/1000 << shift; | 232 | l |= (zonedev->tj_max - temp)/1000 << shift; |
| 232 | l |= intr; | 233 | l |= intr; |
| 233 | } | 234 | } |
| 234 | 235 | ||
| 235 | return wrmsr_on_cpu(pkgdev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); | 236 | return wrmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, |
| 237 | l, h); | ||
| 236 | } | 238 | } |
| 237 | 239 | ||
| 238 | static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip, | 240 | static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip, |
| @@ -287,26 +289,26 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) | |||
| 287 | { | 289 | { |
| 288 | struct thermal_zone_device *tzone = NULL; | 290 | struct thermal_zone_device *tzone = NULL; |
| 289 | int cpu = smp_processor_id(); | 291 | int cpu = smp_processor_id(); |
| 290 | struct pkg_device *pkgdev; | 292 | struct zone_device *zonedev; |
| 291 | u64 msr_val, wr_val; | 293 | u64 msr_val, wr_val; |
| 292 | 294 | ||
| 293 | mutex_lock(&thermal_zone_mutex); | 295 | mutex_lock(&thermal_zone_mutex); |
| 294 | spin_lock_irq(&pkg_temp_lock); | 296 | spin_lock_irq(&pkg_temp_lock); |
| 295 | ++pkg_work_cnt; | 297 | ++pkg_work_cnt; |
| 296 | 298 | ||
| 297 | pkgdev = pkg_temp_thermal_get_dev(cpu); | 299 | zonedev = pkg_temp_thermal_get_dev(cpu); |
| 298 | if (!pkgdev) { | 300 | if (!zonedev) { |
| 299 | spin_unlock_irq(&pkg_temp_lock); | 301 | spin_unlock_irq(&pkg_temp_lock); |
| 300 | mutex_unlock(&thermal_zone_mutex); | 302 | mutex_unlock(&thermal_zone_mutex); |
| 301 | return; | 303 | return; |
| 302 | } | 304 | } |
| 303 | pkgdev->work_scheduled = false; | 305 | zonedev->work_scheduled = false; |
| 304 | 306 | ||
| 305 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); | 307 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); |
| 306 | wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1); | 308 | wr_val = msr_val & ~(THERM_LOG_THRESHOLD0 | THERM_LOG_THRESHOLD1); |
| 307 | if (wr_val != msr_val) { | 309 | if (wr_val != msr_val) { |
| 308 | wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val); | 310 | wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, wr_val); |
| 309 | tzone = pkgdev->tzone; | 311 | tzone = zonedev->tzone; |
| 310 | } | 312 | } |
| 311 | 313 | ||
| 312 | enable_pkg_thres_interrupt(); | 314 | enable_pkg_thres_interrupt(); |
| @@ -332,7 +334,7 @@ static void pkg_thermal_schedule_work(int cpu, struct delayed_work *work) | |||
| 332 | static int pkg_thermal_notify(u64 msr_val) | 334 | static int pkg_thermal_notify(u64 msr_val) |
| 333 | { | 335 | { |
| 334 | int cpu = smp_processor_id(); | 336 | int cpu = smp_processor_id(); |
| 335 | struct pkg_device *pkgdev; | 337 | struct zone_device *zonedev; |
| 336 | unsigned long flags; | 338 | unsigned long flags; |
| 337 | 339 | ||
| 338 | spin_lock_irqsave(&pkg_temp_lock, flags); | 340 | spin_lock_irqsave(&pkg_temp_lock, flags); |
| @@ -341,10 +343,10 @@ static int pkg_thermal_notify(u64 msr_val) | |||
| 341 | disable_pkg_thres_interrupt(); | 343 | disable_pkg_thres_interrupt(); |
| 342 | 344 | ||
| 343 | /* Work is per package, so scheduling it once is enough. */ | 345 | /* Work is per package, so scheduling it once is enough. */ |
| 344 | pkgdev = pkg_temp_thermal_get_dev(cpu); | 346 | zonedev = pkg_temp_thermal_get_dev(cpu); |
| 345 | if (pkgdev && !pkgdev->work_scheduled) { | 347 | if (zonedev && !zonedev->work_scheduled) { |
| 346 | pkgdev->work_scheduled = true; | 348 | zonedev->work_scheduled = true; |
| 347 | pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); | 349 | pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work); |
| 348 | } | 350 | } |
| 349 | 351 | ||
| 350 | spin_unlock_irqrestore(&pkg_temp_lock, flags); | 352 | spin_unlock_irqrestore(&pkg_temp_lock, flags); |
| @@ -353,12 +355,12 @@ static int pkg_thermal_notify(u64 msr_val) | |||
| 353 | 355 | ||
| 354 | static int pkg_temp_thermal_device_add(unsigned int cpu) | 356 | static int pkg_temp_thermal_device_add(unsigned int cpu) |
| 355 | { | 357 | { |
| 356 | int pkgid = topology_logical_die_id(cpu); | 358 | int id = topology_logical_die_id(cpu); |
| 357 | u32 tj_max, eax, ebx, ecx, edx; | 359 | u32 tj_max, eax, ebx, ecx, edx; |
| 358 | struct pkg_device *pkgdev; | 360 | struct zone_device *zonedev; |
| 359 | int thres_count, err; | 361 | int thres_count, err; |
| 360 | 362 | ||
| 361 | if (pkgid >= max_packages) | 363 | if (id >= max_id) |
| 362 | return -ENOMEM; | 364 | return -ENOMEM; |
| 363 | 365 | ||
| 364 | cpuid(6, &eax, &ebx, &ecx, &edx); | 366 | cpuid(6, &eax, &ebx, &ecx, &edx); |
| @@ -372,51 +374,51 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) | |||
| 372 | if (err) | 374 | if (err) |
| 373 | return err; | 375 | return err; |
| 374 | 376 | ||
| 375 | pkgdev = kzalloc(sizeof(*pkgdev), GFP_KERNEL); | 377 | zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL); |
| 376 | if (!pkgdev) | 378 | if (!zonedev) |
| 377 | return -ENOMEM; | 379 | return -ENOMEM; |
| 378 | 380 | ||
| 379 | INIT_DELAYED_WORK(&pkgdev->work, pkg_temp_thermal_threshold_work_fn); | 381 | INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn); |
| 380 | pkgdev->cpu = cpu; | 382 | zonedev->cpu = cpu; |
| 381 | pkgdev->tj_max = tj_max; | 383 | zonedev->tj_max = tj_max; |
| 382 | pkgdev->tzone = thermal_zone_device_register("x86_pkg_temp", | 384 | zonedev->tzone = thermal_zone_device_register("x86_pkg_temp", |
| 383 | thres_count, | 385 | thres_count, |
| 384 | (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01, | 386 | (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01, |
| 385 | pkgdev, &tzone_ops, &pkg_temp_tz_params, 0, 0); | 387 | zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0); |
| 386 | if (IS_ERR(pkgdev->tzone)) { | 388 | if (IS_ERR(zonedev->tzone)) { |
| 387 | err = PTR_ERR(pkgdev->tzone); | 389 | err = PTR_ERR(zonedev->tzone); |
| 388 | kfree(pkgdev); | 390 | kfree(zonedev); |
| 389 | return err; | 391 | return err; |
| 390 | } | 392 | } |
| 391 | /* Store MSR value for package thermal interrupt, to restore at exit */ | 393 | /* Store MSR value for package thermal interrupt, to restore at exit */ |
| 392 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, pkgdev->msr_pkg_therm_low, | 394 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low, |
| 393 | pkgdev->msr_pkg_therm_high); | 395 | zonedev->msr_pkg_therm_high); |
| 394 | 396 | ||
| 395 | cpumask_set_cpu(cpu, &pkgdev->cpumask); | 397 | cpumask_set_cpu(cpu, &zonedev->cpumask); |
| 396 | spin_lock_irq(&pkg_temp_lock); | 398 | spin_lock_irq(&pkg_temp_lock); |
| 397 | packages[pkgid] = pkgdev; | 399 | zones[id] = zonedev; |
| 398 | spin_unlock_irq(&pkg_temp_lock); | 400 | spin_unlock_irq(&pkg_temp_lock); |
| 399 | return 0; | 401 | return 0; |
| 400 | } | 402 | } |
| 401 | 403 | ||
| 402 | static int pkg_thermal_cpu_offline(unsigned int cpu) | 404 | static int pkg_thermal_cpu_offline(unsigned int cpu) |
| 403 | { | 405 | { |
| 404 | struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu); | 406 | struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu); |
| 405 | bool lastcpu, was_target; | 407 | bool lastcpu, was_target; |
| 406 | int target; | 408 | int target; |
| 407 | 409 | ||
| 408 | if (!pkgdev) | 410 | if (!zonedev) |
| 409 | return 0; | 411 | return 0; |
| 410 | 412 | ||
| 411 | target = cpumask_any_but(&pkgdev->cpumask, cpu); | 413 | target = cpumask_any_but(&zonedev->cpumask, cpu); |
| 412 | cpumask_clear_cpu(cpu, &pkgdev->cpumask); | 414 | cpumask_clear_cpu(cpu, &zonedev->cpumask); |
| 413 | lastcpu = target >= nr_cpu_ids; | 415 | lastcpu = target >= nr_cpu_ids; |
| 414 | /* | 416 | /* |
| 415 | * Remove the sysfs files, if this is the last cpu in the package | 417 | * Remove the sysfs files, if this is the last cpu in the package |
| 416 | * before doing further cleanups. | 418 | * before doing further cleanups. |
| 417 | */ | 419 | */ |
| 418 | if (lastcpu) { | 420 | if (lastcpu) { |
| 419 | struct thermal_zone_device *tzone = pkgdev->tzone; | 421 | struct thermal_zone_device *tzone = zonedev->tzone; |
| 420 | 422 | ||
| 421 | /* | 423 | /* |
| 422 | * We must protect against a work function calling | 424 | * We must protect against a work function calling |
| @@ -425,7 +427,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) | |||
| 425 | * won't try to call. | 427 | * won't try to call. |
| 426 | */ | 428 | */ |
| 427 | mutex_lock(&thermal_zone_mutex); | 429 | mutex_lock(&thermal_zone_mutex); |
| 428 | pkgdev->tzone = NULL; | 430 | zonedev->tzone = NULL; |
| 429 | mutex_unlock(&thermal_zone_mutex); | 431 | mutex_unlock(&thermal_zone_mutex); |
| 430 | 432 | ||
| 431 | thermal_zone_device_unregister(tzone); | 433 | thermal_zone_device_unregister(tzone); |
| @@ -439,8 +441,8 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) | |||
| 439 | * one. When we drop the lock, then the interrupt notify function | 441 | * one. When we drop the lock, then the interrupt notify function |
| 440 | * will see the new target. | 442 | * will see the new target. |
| 441 | */ | 443 | */ |
| 442 | was_target = pkgdev->cpu == cpu; | 444 | was_target = zonedev->cpu == cpu; |
| 443 | pkgdev->cpu = target; | 445 | zonedev->cpu = target; |
| 444 | 446 | ||
| 445 | /* | 447 | /* |
| 446 | * If this is the last CPU in the package remove the package | 448 | * If this is the last CPU in the package remove the package |
| @@ -449,23 +451,23 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) | |||
| 449 | * worker will see the package anymore. | 451 | * worker will see the package anymore. |
| 450 | */ | 452 | */ |
| 451 | if (lastcpu) { | 453 | if (lastcpu) { |
| 452 | packages[topology_logical_die_id(cpu)] = NULL; | 454 | zones[topology_logical_die_id(cpu)] = NULL; |
| 453 | /* After this point nothing touches the MSR anymore. */ | 455 | /* After this point nothing touches the MSR anymore. */ |
| 454 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, | 456 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, |
| 455 | pkgdev->msr_pkg_therm_low, pkgdev->msr_pkg_therm_high); | 457 | zonedev->msr_pkg_therm_low, zonedev->msr_pkg_therm_high); |
| 456 | } | 458 | } |
| 457 | 459 | ||
| 458 | /* | 460 | /* |
| 459 | * Check whether there is work scheduled and whether the work is | 461 | * Check whether there is work scheduled and whether the work is |
| 460 | * targeted at the outgoing CPU. | 462 | * targeted at the outgoing CPU. |
| 461 | */ | 463 | */ |
| 462 | if (pkgdev->work_scheduled && was_target) { | 464 | if (zonedev->work_scheduled && was_target) { |
| 463 | /* | 465 | /* |
| 464 | * To cancel the work we need to drop the lock, otherwise | 466 | * To cancel the work we need to drop the lock, otherwise |
| 465 | * we might deadlock if the work needs to be flushed. | 467 | * we might deadlock if the work needs to be flushed. |
| 466 | */ | 468 | */ |
| 467 | spin_unlock_irq(&pkg_temp_lock); | 469 | spin_unlock_irq(&pkg_temp_lock); |
| 468 | cancel_delayed_work_sync(&pkgdev->work); | 470 | cancel_delayed_work_sync(&zonedev->work); |
| 469 | spin_lock_irq(&pkg_temp_lock); | 471 | spin_lock_irq(&pkg_temp_lock); |
| 470 | /* | 472 | /* |
| 471 | * If this is not the last cpu in the package and the work | 473 | * If this is not the last cpu in the package and the work |
| @@ -473,21 +475,21 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) | |||
| 473 | * need to reschedule the work, otherwise the interrupt | 475 | * need to reschedule the work, otherwise the interrupt |
| 474 | * stays disabled forever. | 476 | * stays disabled forever. |
| 475 | */ | 477 | */ |
| 476 | if (!lastcpu && pkgdev->work_scheduled) | 478 | if (!lastcpu && zonedev->work_scheduled) |
| 477 | pkg_thermal_schedule_work(target, &pkgdev->work); | 479 | pkg_thermal_schedule_work(target, &zonedev->work); |
| 478 | } | 480 | } |
| 479 | 481 | ||
| 480 | spin_unlock_irq(&pkg_temp_lock); | 482 | spin_unlock_irq(&pkg_temp_lock); |
| 481 | 483 | ||
| 482 | /* Final cleanup if this is the last cpu */ | 484 | /* Final cleanup if this is the last cpu */ |
| 483 | if (lastcpu) | 485 | if (lastcpu) |
| 484 | kfree(pkgdev); | 486 | kfree(zonedev); |
| 485 | return 0; | 487 | return 0; |
| 486 | } | 488 | } |
| 487 | 489 | ||
| 488 | static int pkg_thermal_cpu_online(unsigned int cpu) | 490 | static int pkg_thermal_cpu_online(unsigned int cpu) |
| 489 | { | 491 | { |
| 490 | struct pkg_device *pkgdev = pkg_temp_thermal_get_dev(cpu); | 492 | struct zone_device *zonedev = pkg_temp_thermal_get_dev(cpu); |
| 491 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 493 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 492 | 494 | ||
| 493 | /* Paranoia check */ | 495 | /* Paranoia check */ |
| @@ -495,8 +497,8 @@ static int pkg_thermal_cpu_online(unsigned int cpu) | |||
| 495 | return -ENODEV; | 497 | return -ENODEV; |
| 496 | 498 | ||
| 497 | /* If the package exists, nothing to do */ | 499 | /* If the package exists, nothing to do */ |
| 498 | if (pkgdev) { | 500 | if (zonedev) { |
| 499 | cpumask_set_cpu(cpu, &pkgdev->cpumask); | 501 | cpumask_set_cpu(cpu, &zonedev->cpumask); |
| 500 | return 0; | 502 | return 0; |
| 501 | } | 503 | } |
| 502 | return pkg_temp_thermal_device_add(cpu); | 504 | return pkg_temp_thermal_device_add(cpu); |
| @@ -515,10 +517,10 @@ static int __init pkg_temp_thermal_init(void) | |||
| 515 | if (!x86_match_cpu(pkg_temp_thermal_ids)) | 517 | if (!x86_match_cpu(pkg_temp_thermal_ids)) |
| 516 | return -ENODEV; | 518 | return -ENODEV; |
| 517 | 519 | ||
| 518 | max_packages = topology_max_packages() * topology_max_die_per_package(); | 520 | max_id = topology_max_packages() * topology_max_die_per_package(); |
| 519 | packages = kcalloc(max_packages, sizeof(struct pkg_device *), | 521 | zones = kcalloc(max_id, sizeof(struct zone_device *), |
| 520 | GFP_KERNEL); | 522 | GFP_KERNEL); |
| 521 | if (!packages) | 523 | if (!zones) |
| 522 | return -ENOMEM; | 524 | return -ENOMEM; |
| 523 | 525 | ||
| 524 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online", | 526 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online", |
| @@ -537,7 +539,7 @@ static int __init pkg_temp_thermal_init(void) | |||
| 537 | return 0; | 539 | return 0; |
| 538 | 540 | ||
| 539 | err: | 541 | err: |
| 540 | kfree(packages); | 542 | kfree(zones); |
| 541 | return ret; | 543 | return ret; |
| 542 | } | 544 | } |
| 543 | module_init(pkg_temp_thermal_init) | 545 | module_init(pkg_temp_thermal_init) |
| @@ -549,7 +551,7 @@ static void __exit pkg_temp_thermal_exit(void) | |||
| 549 | 551 | ||
| 550 | cpuhp_remove_state(pkg_thermal_hp_state); | 552 | cpuhp_remove_state(pkg_thermal_hp_state); |
| 551 | debugfs_remove_recursive(debugfs); | 553 | debugfs_remove_recursive(debugfs); |
| 552 | kfree(packages); | 554 | kfree(zones); |
| 553 | } | 555 | } |
| 554 | module_exit(pkg_temp_thermal_exit) | 556 | module_exit(pkg_temp_thermal_exit) |
| 555 | 557 | ||
