aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwmon/vt8231.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hwmon/vt8231.c')
-rw-r--r--drivers/hwmon/vt8231.c317
1 files changed, 173 insertions, 144 deletions
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index a6a4aa0eee16..320d8141be78 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -29,8 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/jiffies.h> 31#include <linux/jiffies.h>
32#include <linux/i2c.h> 32#include <linux/platform_device.h>
33#include <linux/i2c-isa.h>
34#include <linux/hwmon.h> 33#include <linux/hwmon.h>
35#include <linux/hwmon-sysfs.h> 34#include <linux/hwmon-sysfs.h>
36#include <linux/hwmon-vid.h> 35#include <linux/hwmon-vid.h>
@@ -42,10 +41,7 @@ static int force_addr;
42module_param(force_addr, int, 0); 41module_param(force_addr, int, 0);
43MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors"); 42MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors");
44 43
45/* Device address 44static struct platform_device *pdev;
46 Note that we can't determine the ISA address until we have initialized
47 our module */
48static unsigned short isa_address;
49 45
50#define VT8231_EXTENT 0x80 46#define VT8231_EXTENT 0x80
51#define VT8231_BASE_REG 0x70 47#define VT8231_BASE_REG 0x70
@@ -148,7 +144,9 @@ static inline u8 FAN_TO_REG(long rpm, int div)
148#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div))) 144#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
149 145
150struct vt8231_data { 146struct vt8231_data {
151 struct i2c_client client; 147 unsigned short addr;
148 const char *name;
149
152 struct mutex update_lock; 150 struct mutex update_lock;
153 struct class_device *class_dev; 151 struct class_device *class_dev;
154 char valid; /* !=0 if following fields are valid */ 152 char valid; /* !=0 if following fields are valid */
@@ -168,20 +166,20 @@ struct vt8231_data {
168}; 166};
169 167
170static struct pci_dev *s_bridge; 168static struct pci_dev *s_bridge;
171static int vt8231_detect(struct i2c_adapter *adapter); 169static int vt8231_probe(struct platform_device *pdev);
172static int vt8231_detach_client(struct i2c_client *client); 170static int vt8231_remove(struct platform_device *pdev);
173static struct vt8231_data *vt8231_update_device(struct device *dev); 171static struct vt8231_data *vt8231_update_device(struct device *dev);
174static void vt8231_init_client(struct i2c_client *client); 172static void vt8231_init_device(struct vt8231_data *data);
175 173
176static inline int vt8231_read_value(struct i2c_client *client, u8 reg) 174static inline int vt8231_read_value(struct vt8231_data *data, u8 reg)
177{ 175{
178 return inb_p(client->addr + reg); 176 return inb_p(data->addr + reg);
179} 177}
180 178
181static inline void vt8231_write_value(struct i2c_client *client, u8 reg, 179static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
182 u8 value) 180 u8 value)
183{ 181{
184 outb_p(value, client->addr + reg); 182 outb_p(value, data->addr + reg);
185} 183}
186 184
187/* following are the sysfs callback functions */ 185/* following are the sysfs callback functions */
@@ -220,13 +218,12 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
220{ 218{
221 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); 219 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
222 int nr = sensor_attr->index; 220 int nr = sensor_attr->index;
223 struct i2c_client *client = to_i2c_client(dev); 221 struct vt8231_data *data = dev_get_drvdata(dev);
224 struct vt8231_data *data = i2c_get_clientdata(client);
225 unsigned long val = simple_strtoul(buf, NULL, 10); 222 unsigned long val = simple_strtoul(buf, NULL, 10);
226 223
227 mutex_lock(&data->update_lock); 224 mutex_lock(&data->update_lock);
228 data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); 225 data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
229 vt8231_write_value(client, regvoltmin[nr], data->in_min[nr]); 226 vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]);
230 mutex_unlock(&data->update_lock); 227 mutex_unlock(&data->update_lock);
231 return count; 228 return count;
232} 229}
@@ -236,13 +233,12 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
236{ 233{
237 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); 234 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
238 int nr = sensor_attr->index; 235 int nr = sensor_attr->index;
239 struct i2c_client *client = to_i2c_client(dev); 236 struct vt8231_data *data = dev_get_drvdata(dev);
240 struct vt8231_data *data = i2c_get_clientdata(client);
241 unsigned long val = simple_strtoul(buf, NULL, 10); 237 unsigned long val = simple_strtoul(buf, NULL, 10);
242 238
243 mutex_lock(&data->update_lock); 239 mutex_lock(&data->update_lock);
244 data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255); 240 data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
245 vt8231_write_value(client, regvoltmax[nr], data->in_max[nr]); 241 vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]);
246 mutex_unlock(&data->update_lock); 242 mutex_unlock(&data->update_lock);
247 return count; 243 return count;
248} 244}
@@ -278,14 +274,13 @@ static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr,
278static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr, 274static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
279 const char *buf, size_t count) 275 const char *buf, size_t count)
280{ 276{
281 struct i2c_client *client = to_i2c_client(dev); 277 struct vt8231_data *data = dev_get_drvdata(dev);
282 struct vt8231_data *data = i2c_get_clientdata(client);
283 unsigned long val = simple_strtoul(buf, NULL, 10); 278 unsigned long val = simple_strtoul(buf, NULL, 10);
284 279
285 mutex_lock(&data->update_lock); 280 mutex_lock(&data->update_lock);
286 data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 281 data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
287 0, 255); 282 0, 255);
288 vt8231_write_value(client, regvoltmin[5], data->in_min[5]); 283 vt8231_write_value(data, regvoltmin[5], data->in_min[5]);
289 mutex_unlock(&data->update_lock); 284 mutex_unlock(&data->update_lock);
290 return count; 285 return count;
291} 286}
@@ -293,14 +288,13 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
293static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr, 288static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
294 const char *buf, size_t count) 289 const char *buf, size_t count)
295{ 290{
296 struct i2c_client *client = to_i2c_client(dev); 291 struct vt8231_data *data = dev_get_drvdata(dev);
297 struct vt8231_data *data = i2c_get_clientdata(client);
298 unsigned long val = simple_strtoul(buf, NULL, 10); 292 unsigned long val = simple_strtoul(buf, NULL, 10);
299 293
300 mutex_lock(&data->update_lock); 294 mutex_lock(&data->update_lock);
301 data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3, 295 data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
302 0, 255); 296 0, 255);
303 vt8231_write_value(client, regvoltmax[5], data->in_max[5]); 297 vt8231_write_value(data, regvoltmax[5], data->in_max[5]);
304 mutex_unlock(&data->update_lock); 298 mutex_unlock(&data->update_lock);
305 return count; 299 return count;
306} 300}
@@ -348,26 +342,24 @@ static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr,
348static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr, 342static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
349 const char *buf, size_t count) 343 const char *buf, size_t count)
350{ 344{
351 struct i2c_client *client = to_i2c_client(dev); 345 struct vt8231_data *data = dev_get_drvdata(dev);
352 struct vt8231_data *data = i2c_get_clientdata(client);
353 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (i++ == idx) break; } if (!mpath) return NULL; if (mpath_expired(mpath)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; spin_unlock_bh(&mpath->state_lock); } return mpath; } /** * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index * @idx: index * @sdata: local subif, or NULL for all entries * * Returns: pointer to the mesh path structure, or NULL if not found. * * Locking: must be called within a read rcu section. */ struct mesh_path * mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx); } /** * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index * @idx: index * @sdata: local subif, or NULL for all entries * * Returns: pointer to the proxy path structure, or NULL if not found. * * Locking: must be called within a read rcu section. */ struct mesh_path * mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx); } /** * mesh_path_add_gate - add the given mpath to a mesh gate to our path table * @mpath: gate path to add to table */ int mesh_path_add_gate(struct mesh_path *mpath) { struct mesh_table *tbl; int err; rcu_read_lock(); tbl = mpath->sdata->u.mesh.mesh_paths; spin_lock_bh(&mpath->state_lock); if (mpath->is_gate) { err = -EEXIST; spin_unlock_bh(&mpath->state_lock); goto err_rcu; } mpath->is_gate = true; mpath->sdata->u.mesh.num_gates++; spin_lock(&tbl->gates_lock); hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates); spin_unlock(&tbl->gates_lock); spin_unlock_bh(&mpath->state_lock); mpath_dbg(mpath->sdata, "Mesh path: Recorded new gate: %pM. %d known gates\n", mpath->dst, mpath->sdata->u.mesh.num_gates); err = 0; err_rcu: rcu_read_unlock(); return err; } /** * mesh_gate_del - remove a mesh gate from the list of known gates * @tbl: table which holds our list of known gates * @mpath: gate mpath */ static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) { lockdep_assert_held(&mpath->state_lock); if (!mpath->is_gate) return; mpath->is_gate = false; spin_lock_bh(&tbl->gates_lock); hlist_del_rcu(&mpath->gate_list); mpath->sdata->u.mesh.num_gates--; spin_unlock_bh(&tbl->gates_lock); mpath_dbg(mpath->sdata, "Mesh path: Deleted gate: %pM. %d known gates\n", mpath->dst, mpath->sdata->u.mesh.num_gates); } /** * mesh_gate_num - number of gates known to this interface * @sdata: subif data */ int mesh_gate_num(struct ieee80211_sub_if_data *sdata) { return sdata->u.mesh.num_gates; } static struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata, const u8 *dst, gfp_t gfp_flags) { struct mesh_path *new_mpath; new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags); if (!new_mpath) return NULL; memcpy(new_mpath->dst, dst, ETH_ALEN); eth_broadcast_addr(new_mpath->rann_snd_addr); new_mpath->is_root = false; new_mpath->sdata = sdata; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); timer_setup(&new_mpath->timer, mesh_path_timer, 0); return new_mpath; } /** * mesh_path_add - allocate and add a new path to the mesh path table * @dst: destination address of the path (ETH_ALEN length) * @sdata: local subif * * Returns: 0 on success * * State: the initial state of the new path is set to 0 */ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst) { struct mesh_table *tbl; struct mesh_path *mpath, *new_mpath; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return ERR_PTR(-ENOTSUPP); if (is_multicast_ether_addr(dst)) return ERR_PTR(-ENOTSUPP); if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) return ERR_PTR(-ENOSPC); new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) return ERR_PTR(-ENOMEM); tbl = sdata->u.mesh.mesh_paths; spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); if (!mpath) hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); spin_unlock_bh(&tbl->walk_lock); if (mpath) { kfree(new_mpath); if (IS_ERR(mpath)) return mpath; new_mpath = mpath; } sdata->u.mesh.mesh_paths_generation++; return new_mpath; } int mpp_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst, const u8 *mpp) { struct mesh_table *tbl; struct mesh_path *new_mpath; int ret; if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -ENOTSUPP; if (is_multicast_ether_addr(dst)) return -ENOTSUPP; new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); if (!new_mpath) return -ENOMEM; memcpy(new_mpath->mpp, mpp, ETH_ALEN); tbl = sdata->u.mesh.mpp_paths; spin_lock_bh(&tbl->walk_lock); ret = rhashtable_lookup_insert_fast(&tbl->rhead, &new_mpath->rhash, mesh_rht_params); if (!ret) hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); spin_unlock_bh(&tbl->walk_lock); if (ret) kfree(new_mpath); sdata->u.mesh.mpp_paths_generation++; return ret; } /** * mesh_plink_broken - deactivates paths and sends perr when a link breaks * * @sta: broken peer link * * This function must be called from the rate control algorithm if enough * delivery errors suggest that a peer link is no longer usable. */ void mesh_plink_broken(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = sdata->u.mesh.mesh_paths; static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; rcu_read_lock(); hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; ++mpath->sn; spin_unlock_bh(&mpath->state_lock); mesh_path_error_tx(sdata, sdata->u.mesh.mshcfg.element_ttl, mpath->dst, mpath->sn, WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); } } rcu_read_unlock(); } static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; spin_lock_bh(&mpath->state_lock); mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED; mesh_gate_del(tbl, mpath); spin_unlock_bh(&mpath->state_lock); del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); kfree_rcu(mpath, rcu); } static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) { hlist_del_rcu(&mpath->walk_list); rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); mesh_path_free_rcu(tbl, mpath); } /** * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches * * @sta: mesh peer to match * * RCU notes: this function is called when a mesh plink transitions from * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that * allows path creation. This will happen before the sta can be freed (because * sta_info_destroy() calls this) so any reader in a rcu read block will be * protected against the plink disappearing. */ void mesh_path_flush_by_nexthop(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct mesh_table *tbl = sdata->u.mesh.mesh_paths; struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (rcu_access_pointer(mpath->next_hop) == sta) __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, const u8 *proxy) { struct mesh_table *tbl = sdata->u.mesh.mpp_paths; struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if (ether_addr_equal(mpath->mpp, proxy)) __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } static void table_flush_by_iface(struct mesh_table *tbl) { struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } /** * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface * * This function deletes both mesh paths as well as mesh portal paths. * * @sdata: interface data to match * */ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { table_flush_by_iface(sdata->u.mesh.mesh_paths); table_flush_by_iface(sdata->u.mesh.mpp_paths); } /** * table_path_del - delete a path from the mesh or mpp table * * @tbl: mesh or mpp path table * @sdata: local subif * @addr: dst address (ETH_ALEN length) * * Returns: 0 if successful */ static int table_path_del(struct mesh_table *tbl, struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct mesh_path *mpath; spin_lock_bh(&tbl->walk_lock); mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); if (!mpath) { spin_unlock_bh(&tbl->walk_lock); return -ENXIO; } __mesh_path_del(tbl, mpath); spin_unlock_bh(&tbl->walk_lock); return 0; } /** * mesh_path_del - delete a mesh path from the table * * @addr: dst address (ETH_ALEN length) * @sdata: local subif * * Returns: 0 if successful */ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) { int err; /* flush relevant mpp entries first */ mpp_flush_by_proxy(sdata, addr); err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); sdata->u.mesh.mesh_paths_generation++; return err; } /** * mesh_path_tx_pending - sends pending frames in a mesh path queue * * @mpath: mesh path to activate * * Locking: the state_lock of the mpath structure must NOT be held when calling * this function. */ void mesh_path_tx_pending(struct mesh_path *mpath) { if (mpath->flags & MESH_PATH_ACTIVE) ieee80211_add_pending_skbs(mpath->sdata->local, &mpath->frame_queue); } /** * mesh_path_send_to_gates - sends pending frames to all known mesh gates * * @mpath: mesh path whose queue will be emptied * * If there is only one gate, the frames are transferred from the failed mpath * queue to that gate's queue. If there are more than one gates, the frames * are copied from each gate to the next. After frames are copied, the * mpath queues are emptied onto the transmission queue. */ int mesh_path_send_to_gates(struct mesh_path *mpath) { struct ieee80211_sub_if_data *sdata = mpath->sdata; struct mesh_table *tbl; struct mesh_path *from_mpath = mpath; struct mesh_path *gate; bool copy = false; tbl = sdata->u.mesh.mesh_paths; rcu_read_lock(); hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { if (gate->flags & MESH_PATH_ACTIVE) { mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); mesh_path_move_to_queue(gate, from_mpath, copy); from_mpath = gate; copy = true; } else { mpath_dbg(sdata, "Not forwarding to %pM (flags %#x)\n", gate->dst, gate->flags); } } hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) { mpath_dbg(sdata, "Sending to %pM\n", gate->dst); mesh_path_tx_pending(gate); } rcu_read_unlock(); return (from_mpath == mpath) ? -EHOSTUNREACH : 0; } /** * mesh_path_discard_frame - discard a frame whose path could not be resolved * * @skb: frame to discard * @sdata: network subif the frame was to be sent through * * Locking: the function must me called within a rcu_read_lock region */ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { kfree_skb(skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; } /** * mesh_path_flush_pending - free the pending queue of a mesh path * * @mpath: mesh path whose queue has to be freed * * Locking: the function must me called within a rcu_read_lock region */ void mesh_path_flush_pending(struct mesh_path *mpath) { struct sk_buff *skb; while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) mesh_path_discard_frame(mpath->sdata, skb); } /** * mesh_path_fix_nexthop - force a specific next hop for a mesh path * * @mpath: the mesh path to modify * @next_hop: the next hop to force * * Locking: this function must be called holding mpath->state_lock */ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) { spin_lock_bh(&mpath->state_lock); mesh_path_assign_nexthop(mpath, next_hop); mpath->sn = 0xffff; mpath->metric = 0; mpath->hop_count = 0; mpath->exp_time = 0; mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID; mesh_path_activate(mpath); spin_unlock_bh(&mpath->state_lock); ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg); /* init it at a low value - 0 start is tricky */ ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1); mesh_path_tx_pending(mpath); } int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) { struct mesh_table *tbl_path, *tbl_mpp; int ret; tbl_path = mesh_table_alloc(); if (!tbl_path) return -ENOMEM; tbl_mpp = mesh_table_alloc(); if (!tbl_mpp) { ret = -ENOMEM; goto free_path; } rhashtable_init(&tbl_path->rhead, &mesh_rht_params); rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params); sdata->u.mesh.mesh_paths = tbl_path; sdata->u.mesh.mpp_paths = tbl_mpp; return 0; free_path: mesh_table_free(tbl_path); return ret; } static void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) { struct mesh_path *mpath; struct hlist_node *n; spin_lock_bh(&tbl->walk_lock); hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) __mesh_path_del(tbl, mpath); } spin_unlock_bh(&tbl->walk_lock); } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) { mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths); mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths); } void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) { mesh_table_free(sdata->u.mesh.mesh_paths); mesh_table_free(sdata->u.mesh.mpp_paths); }
686 struct vt8231_data *data; 682 struct vt8231_data *data; 687 int err = 0, i; 683 int err = 0, i; 688 u16 val; 689 690 /* 8231 requires multiple of 256 */ 691 if (force_addr) { 692 isa_address = force_addr & 0xFF00; 693 dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n", 694 isa_address); 695 if (PCIBIOS_SUCCESSFUL != pci_write_config_word(s_bridge, 696 VT8231_BASE_REG, isa_address)) 697 return -ENODEV; 698 } 699 700 if (PCIBIOS_SUCCESSFUL != 701 pci_read_config_word(s_bridge, VT8231_ENABLE_REG, &val)) 702 return -ENODEV; 703 704 if (!(val & 0x0001)) { 705 dev_warn(&adapter->dev, "enabling sensors\n"); 706 if (PCIBIOS_SUCCESSFUL != 707 pci_write_config_word(s_bridge, VT8231_ENABLE_REG, 708 val | 0x0001)) 709 return -ENODEV; 710 } 711 684 712 /* Reserve the ISA region */ 685 /* Reserve the ISA region */ 713 if (!request_region(isa_address, VT8231_EXTENT, 686 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 714 vt8231_pci_driver.name)) { 687 if (!request_region(res->start, VT8231_EXTENT, 715 dev_err(&adapter->dev, "region 0x%x already in use!\n", 688 vt8231_driver.driver.name)) { 716 isa_address); 689 dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n", 690 (unsigned long)res->start, (unsigned long)res->end); 717 return -ENODEV; 691 return -ENODEV; 718 } 692 } 719 693@@ -722,33 +696,23 @@ int vt8231_detect(struct i2c_adapter *adapter) 722 goto exit_release; 696 goto exit_release; 723 } 697 } 724 698 725 client = &data->client; 699 platform_set_drvdata(pdev, data); 726 i2c_set_clientdata(client, data); 700 data->addr = res->start; 727 client->addr = isa_address; 701 data->name = "vt8231"; 728 client->adapter = adapter; 729 client->driver = &vt8231_driver; 730 731 /* Fill in the remaining client fields and put into the global list */ 732 strlcpy(client->name, "vt8231", I2C_NAME_SIZE); 733 702 734 mutex_init(&data->update_lock); 703 mutex_init(&data->update_lock); 735 704 vt8231_init_device(data); 736 /* Tell the I2C layer a new client has arrived */ 737 if ((err = i2c_attach_client(client))) 738 goto exit_free; 739 740 vt8231_init_client(client); 741 705 742 /* Register sysfs hooks */ 706 /* Register sysfs hooks */ 743 if ((err = sysfs_create_group(&client->dev.kobj, &vt8231_group))) 707 if ((err = sysfs_create_group(&pdev->dev.kobj, &vt8231_group))) 744 goto exit_detach; 708 goto exit_free; 745 709 746 /* Must update device information to find out the config field */ 710 /* Must update device information to find out the config field */ 747 data->uch_config = vt8231_read_value(client, VT8231_REG_UCH_CONFIG); 711 data->uch_config = vt8231_read_value(data, VT8231_REG_UCH_CONFIG); 748 712 749 for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) { 713 for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) { 750 if (ISTEMP(i, data->uch_config)) { 714 if (ISTEMP(i, data->uch_config)) { 751 if ((err = sysfs_create_group(&client->dev.kobj, 715 if ((err = sysfs_create_group(&pdev->dev.kobj, 752 &vt8231_group_temps[i]))) 716 &vt8231_group_temps[i]))) 753 goto exit_remove_files; 717 goto exit_remove_files; 754 } 718 }@@ -756,13 +720,13 @@ int vt8231_detect(struct i2c_adapter *adapter) 756 720 757 for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) { 721 for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) { 758 if (ISVOLT(i, data->uch_config)) { 722 if (ISVOLT(i, data->uch_config)) { 759 if ((err = sysfs_create_group(&client->dev.kobj, 723 if ((err = sysfs_create_group(&pdev->dev.kobj, 760 &vt8231_group_volts[i]))) 724 &vt8231_group_volts[i]))) 761 goto exit_remove_files; 725 goto exit_remove_files; 762 } 726 } 763 } 727 } 764 728 765 data->class_dev = hwmon_device_register(&client->dev); 729 data->class_dev = hwmon_device_register(&pdev->dev); 766 if (IS_ERR(data->class_dev)) { 730 if (IS_ERR(data->class_dev)) { 767 err = PTR_ERR(data->class_dev); 731 err = PTR_ERR(data->class_dev); 768 goto exit_remove_files; 732 goto exit_remove_files;@@ -771,56 +735,51 @@ int vt8231_detect(struct i2c_adapter *adapter) 771 735 772exit_remove_files: 736exit_remove_files: 773 for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) 737 for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) 774 sysfs_remove_group(&client->dev.kobj, &vt8231_group_volts[i]); 738 sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); 775 739 776 for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) 740 for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) 777 sysfs_remove_group(&client->dev.kobj, &vt8231_group_temps[i]); 741 sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); 742 743 sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); 778 744 779 sysfs_remove_group(&client->dev.kobj, &vt8231_group); 780exit_detach: 781 i2c_detach_client(client); 782exit_free: 745exit_free: 783 kfree(data); 746 kfree(data); 747 784exit_release: 748exit_release: 785 release_region(isa_address, VT8231_EXTENT); 749 release_region(res->start, VT8231_EXTENT); 786 return err; 750 return err; 787} 751} 788 752 789static int vt8231_detach_client(struct i2c_client *client) 753static int vt8231_remove(struct platform_device *pdev) 790{ 754{ 791 struct vt8231_data *data = i2c_get_clientdata(client); 755 struct vt8231_data *data = platform_get_drvdata(pdev); 792 int err, i; 756 int i; 793 757 794 hwmon_device_unregister(data->class_dev); 758 hwmon_device_unregister(data->class_dev); 795 759 796 for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) 760 for (i = 0; i < ARRAY_SIZE(vt8231_group_volts); i++) 797 sysfs_remove_group(&client->dev.kobj, &vt8231_group_volts[i]); 761 sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_volts[i]); 798 762 799 for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) 763 for (i = 0; i < ARRAY_SIZE(vt8231_group_temps); i++) 800 sysfs_remove_group(&client->dev.kobj, &vt8231_group_temps[i]); 764 sysfs_remove_group(&pdev->dev.kobj, &vt8231_group_temps[i]); 801 765 802 sysfs_remove_group(&client->dev.kobj, &vt8231_group); 766 sysfs_remove_group(&pdev->dev.kobj, &vt8231_group); 803 767 804 if ((err = i2c_detach_client(client))) { 768 release_region(data->addr, VT8231_EXTENT); 805 return err; 769 platform_set_drvdata(pdev, NULL); 806 } 807 808 release_region(client->addr, VT8231_EXTENT); 809 kfree(data); 770 kfree(data); 810 811 return 0; 771 return 0; 812} 772} 813 773 814static void vt8231_init_client(struct i2c_client *client) 774static void vt8231_init_device(struct vt8231_data *data) 815{ 775{ 816 vt8231_write_value(client, VT8231_REG_TEMP1_CONFIG, 0); 776 vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0); 817 vt8231_write_value(client, VT8231_REG_TEMP2_CONFIG, 0); 777 vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0); 818} 778} 819 779 820static struct vt8231_data *vt8231_update_device(struct device *dev) 780static struct vt8231_data *vt8231_update_device(struct device *dev) 821{ 781{ 822 struct i2c_client *client = to_i2c_client(dev); 782 struct vt8231_data *data = dev_get_drvdata(dev); 823 struct vt8231_data *data = i2c_get_clientdata(client); 824 int i; 783 int i; 825 u16 low; 784 u16 low; 826 785@@ -830,41 +789,41 @@ static struct vt8231_data *vt8231_update_device(struct device *dev) 830 || !data->valid) { 789 || !data->valid) { 831 for (i = 0; i < 6; i++) { 790 for (i = 0; i < 6; i++) { 832 if (ISVOLT(i, data->uch_config)) { 791 if (ISVOLT(i, data->uch_config)) { 833 data->in[i] = vt8231_read_value(client, 792 data->in[i] = vt8231_read_value(data, 834 regvolt[i]); 793 regvolt[i]); 835 data->in_min[i] = vt8231_read_value(client, 794 data->in_min[i] = vt8231_read_value(data, 836 regvoltmin[i]); 795 regvoltmin[i]); 837 data->in_max[i] = vt8231_read_value(client, 796 data->in_max[i] = vt8231_read_value(data, 838 regvoltmax[i]); 797 regvoltmax[i]); 839 } 798 } 840 } 799 } 841 for (i = 0; i < 2; i++) { 800 for (i = 0; i < 2; i++) { 842 data->fan[i] = vt8231_read_value(client, 801 data->fan[i] = vt8231_read_value(data, 843 VT8231_REG_FAN(i)); 802 VT8231_REG_FAN(i)); 844 data->fan_min[i] = vt8231_read_value(client, 803 data->fan_min[i] = vt8231_read_value(data, 845 VT8231_REG_FAN_MIN(i)); 804 VT8231_REG_FAN_MIN(i)); 846 } 805 } 847 806 848 low = vt8231_read_value(client, VT8231_REG_TEMP_LOW01); 807 low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01); 849 low = (low >> 6) | ((low & 0x30) >> 2) 808 low = (low >> 6) | ((low & 0x30) >> 2) 850 | (vt8231_read_value(client, VT8231_REG_TEMP_LOW25) << 4); 809 | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4); 851 for (i = 0; i < 6; i++) { 810 for (i = 0; i < 6; i++) { 852 if (ISTEMP(i, data->uch_config)) { 811 if (ISTEMP(i, data->uch_config)) { 853 data->temp[i] = (vt8231_read_value(client, 812 data->temp[i] = (vt8231_read_value(data, 854 regtemp[i]) << 2) 813 regtemp[i]) << 2) 855 | ((low >> (2 * i)) & 0x03); 814 | ((low >> (2 * i)) & 0x03); 856 data->temp_max[i] = vt8231_read_value(client, 815 data->temp_max[i] = vt8231_read_value(data, 857 regtempmax[i]); 816 regtempmax[i]); 858 data->temp_min[i] = vt8231_read_value(client, 817 data->temp_min[i] = vt8231_read_value(data, 859 regtempmin[i]); 818 regtempmin[i]); 860 } 819 } 861 } 820 } 862 821 863 i = vt8231_read_value(client, VT8231_REG_FANDIV); 822 i = vt8231_read_value(data, VT8231_REG_FANDIV); 864 data->fan_div[0] = (i >> 4) & 0x03; 823 data->fan_div[0] = (i >> 4) & 0x03; 865 data->fan_div[1] = i >> 6; 824 data->fan_div[1] = i >> 6; 866 data->alarms = vt8231_read_value(client, VT8231_REG_ALARM1) | 825 data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) | 867 (vt8231_read_value(client, VT8231_REG_ALARM2) << 8); 826 (vt8231_read_value(data, VT8231_REG_ALARM2) << 8); 868 827 869 /* Set alarm flags correctly */ 828 /* Set alarm flags correctly */ 870 if (!data->fan[0] && data->fan_min[0]) { 829 if (!data->fan[0] && data->fan_min[0]) {@@ -888,33 +847,102 @@ static struct vt8231_data *vt8231_update_device(struct device *dev) 888 return data; 847 return data; 889} 848} 890 849 850static int __devinit vt8231_device_add(unsigned short address) 851{ 852 struct resource res = { 853 .start = address, 854 .end = address + VT8231_EXTENT - 1, 855 .name = "vt8231", 856 .flags = IORESOURCE_IO, 857 }; 858 int err; 859 860 pdev = platform_device_alloc("vt8231", address); 861 if (!pdev) { 862 err = -ENOMEM; 863 printk(KERN_ERR "vt8231: Device allocation failed\n"); 864 goto exit; 865 } 866 867 err = platform_device_add_resources(pdev, &res, 1); 868 if (err) { 869 printk(KERN_ERR "vt8231: Device resource addition failed " 870 "(%d)\n", err); 871 goto exit_device_put; 872 } 873 874 err = platform_device_add(pdev); 875 if (err) { 876 printk(KERN_ERR "vt8231: Device addition failed (%d)\n", 877 err); 878 goto exit_device_put; 879 } 880 881 return 0; 882 883exit_device_put: 884 platform_device_put(pdev); 885exit: 886 return err; 887} 888 891static int __devinit vt8231_pci_probe(struct pci_dev *dev, 889static int __devinit vt8231_pci_probe(struct pci_dev *dev, 892 const struct pci_device_id *id) 890 const struct pci_device_id *id) 893{ 891{ 894 u16 val; 892 u16 address, val; 893 if (force_addr) { 894 address = force_addr & 0xff00; 895 dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", 896 address); 897 898 if (PCIBIOS_SUCCESSFUL != 899 pci_write_config_word(dev, VT8231_BASE_REG, address | 1)) 900 return -ENODEV; 901 } 895 902 896 if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG, 903 if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG, 897 &val)) 904 &val)) 898 return -ENODEV; 905 return -ENODEV; 899 906 900 isa_address = val & ~(VT8231_EXTENT - 1); 907 address = val & ~(VT8231_EXTENT - 1); 901 if (isa_address == 0 && force_addr == 0) { 908 if (address == 0) { 902 dev_err(&dev->dev, "base address not set -\ 909 dev_err(&dev->dev, "base address not set -\ 903 upgrade BIOS or use force_addr=0xaddr\n"); 910 upgrade BIOS or use force_addr=0xaddr\n"); 904 return -ENODEV; 911 return -ENODEV; 905 } 912 } 906 913 907 s_bridge = pci_dev_get(dev); 914 if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_ENABLE_REG, 915 &val)) 916 return -ENODEV; 908 917 909 if (i2c_isa_add_driver(&vt8231_driver)) { 918 if (!(val & 0x0001)) { 910 pci_dev_put(s_bridge); 919 dev_warn(&dev->dev, "enabling sensors\n"); 911 s_bridge = NULL; 920 if (PCIBIOS_SUCCESSFUL != 921 pci_write_config_word(dev, VT8231_ENABLE_REG, 922 val | 0x0001)) 923 return -ENODEV; 912 } 924 } 913 925 926 if (platform_driver_register(&vt8231_driver)) 927 goto exit; 928 929 /* Sets global pdev as a side effect */ 930 if (vt8231_device_add(address)) 931 goto exit_unregister; 932 914 /* Always return failure here. This is to allow other drivers to bind 933 /* Always return failure here. This is to allow other drivers to bind 915 * to this pci device. We don't really want to have control over the 934 * to this pci device. We don't really want to have control over the 916 * pci device, we only wanted to read as few register values from it. 935 * pci device, we only wanted to read as few register values from it. 917 */ 936 */ 937 938 /* We do, however, mark ourselves as using the PCI device to stop it 939 getting unloaded. */ 940 s_bridge = pci_dev_get(dev); 941 return -ENODEV; 942 943exit_unregister: 944 platform_driver_unregister(&vt8231_driver); 945exit: 918 return -ENODEV; 946 return -ENODEV; 919} 947} 920 948@@ -927,7 +955,8 @@ static void __exit sm_vt8231_exit(void) 927{ 955{ 928 pci_unregister_driver(&vt8231_pci_driver); 956 pci_unregister_driver(&vt8231_pci_driver); 929 if (s_bridge != NULL) { 957 if (s_bridge != NULL) { 930 i2c_isa_del_driver(&vt8231_driver); 958 platform_device_unregister(pdev); 959 platform_driver_unregister(&vt8231_driver); 931 pci_dev_put(s_bridge); 960 pci_dev_put(s_bridge); 932 s_bridge = NULL; 961 s_bridge = NULL; 933 } 962 }