summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGeorgi Djakov <georgi.djakov@linaro.org>2019-01-16 11:10:56 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-01-22 07:37:25 -0500
commit11f1ceca7031deefc1a34236ab7b94360016b71d (patch)
tree36f1f8c883a4d268b8b9e5c55d0b22e85cda8a8e
parent2ca46ed207d5d4e3c7a183fe11e8a2d02f86e7c6 (diff)
interconnect: Add generic on-chip interconnect API
This patch introduces a new API to get requirements and configure the interconnect buses across the entire chipset to fit with the current demand. The API is using a consumer/provider-based model, where the providers are the interconnect buses and the consumers could be various drivers. The consumers request interconnect resources (path) between endpoints and set the desired constraints on this data flow path. The providers receive requests from consumers and aggregate these requests for all master-slave pairs on that path. Then the providers configure each node along the path to support a bandwidth that satisfies all bandwidth requests that cross through that node. The topology could be complicated and multi-tiered and is SoC specific. Reviewed-by: Evan Green <evgreen@chromium.org> Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--Documentation/interconnect/interconnect.rst94
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/interconnect/Kconfig10
-rw-r--r--drivers/interconnect/Makefile5
-rw-r--r--drivers/interconnect/core.c567
-rw-r--r--include/linux/interconnect-provider.h125
-rw-r--r--include/linux/interconnect.h52
8 files changed, 856 insertions, 0 deletions
diff --git a/Documentation/interconnect/interconnect.rst b/Documentation/interconnect/interconnect.rst
new file mode 100644
index 000000000000..b8107dcc4cd3
--- /dev/null
+++ b/Documentation/interconnect/interconnect.rst
@@ -0,0 +1,94 @@
1.. SPDX-License-Identifier: GPL-2.0
2
3=====================================
4GENERIC SYSTEM INTERCONNECT SUBSYSTEM
5=====================================
6
7Introduction
8------------
9
10This framework is designed to provide a standard kernel interface to control
11the settings of the interconnects on an SoC. These settings can be throughput,
12latency and priority between multiple interconnected devices or functional
13blocks. This can be controlled dynamically in order to save power or provide
14maximum performance.
15
16The interconnect bus is hardware with configurable parameters, which can be
17set on a data path according to the requests received from various drivers.
18An example of interconnect buses are the interconnects between various
19components or functional blocks in chipsets. There can be multiple interconnects
20on an SoC that can be multi-tiered.
21
22Below is a simplified diagram of a real-world SoC interconnect bus topology.
23
24::
25
26 +----------------+ +----------------+
27 | HW Accelerator |--->| M NoC |<---------------+
28 +----------------+ +----------------+ |
29 | | +------------+
30 +-----+ +-------------+ V +------+ | |
31 | DDR | | +--------+ | PCIe | | |
32 +-----+ | | Slaves | +------+ | |
33 ^ ^ | +--------+ | | C NoC |
34 | | V V | |
35 +------------------+ +------------------------+ | | +-----+
36 | |-->| |-->| |-->| CPU |
37 | |-->| |<--| | +-----+
38 | Mem NoC | | S NoC | +------------+
39 | |<--| |---------+ |
40 | |<--| |<------+ | | +--------+
41 +------------------+ +------------------------+ | | +-->| Slaves |
42 ^ ^ ^ ^ ^ | | +--------+
43 | | | | | | V
44 +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
45 | CPUs | | | GPU | | DSP | | Masters |-->| P NoC |-->| Slaves |
46 +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+
47 |
48 +-------+
49 | Modem |
50 +-------+
51
52Terminology
53-----------
54
55Interconnect provider is the software definition of the interconnect hardware.
56The interconnect providers on the above diagram are M NoC, S NoC, C NoC, P NoC
57and Mem NoC.
58
59Interconnect node is the software definition of the interconnect hardware
60port. Each interconnect provider consists of multiple interconnect nodes,
61which are connected to other SoC components including other interconnect
62providers. The point on the diagram where the CPUs connect to the memory is
63called an interconnect node, which belongs to the Mem NoC interconnect provider.
64
65Interconnect endpoints are the first or the last element of the path. Every
66endpoint is a node, but not every node is an endpoint.
67
68Interconnect path is everything between two endpoints including all the nodes
69that have to be traversed to reach from a source to destination node. It may
70include multiple master-slave pairs across several interconnect providers.
71
72Interconnect consumers are the entities which make use of the data paths exposed
73by the providers. The consumers send requests to providers requesting various
74throughput, latency and priority. Usually the consumers are device drivers, that
75send request based on their needs. An example for a consumer is a video decoder
76that supports various formats and image sizes.
77
78Interconnect providers
79----------------------
80
81Interconnect provider is an entity that implements methods to initialize and
82configure interconnect bus hardware. The interconnect provider drivers should
83be registered with the interconnect provider core.
84
85.. kernel-doc:: include/linux/interconnect-provider.h
86
87Interconnect consumers
88----------------------
89
90Interconnect consumers are the clients which use the interconnect APIs to
91get paths between endpoints and set their bandwidth/latency/QoS requirements
92for these interconnect paths.
93
94.. kernel-doc:: include/linux/interconnect.h
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4f9f99057ff8..45f9decb9848 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -228,4 +228,6 @@ source "drivers/siox/Kconfig"
228 228
229source "drivers/slimbus/Kconfig" 229source "drivers/slimbus/Kconfig"
230 230
231source "drivers/interconnect/Kconfig"
232
231endmenu 233endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index e1ce029d28fd..bb15b9d0e793 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -186,3 +186,4 @@ obj-$(CONFIG_MULTIPLEXER) += mux/
186obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ 186obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
187obj-$(CONFIG_SIOX) += siox/ 187obj-$(CONFIG_SIOX) += siox/
188obj-$(CONFIG_GNSS) += gnss/ 188obj-$(CONFIG_GNSS) += gnss/
189obj-$(CONFIG_INTERCONNECT) += interconnect/
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
new file mode 100644
index 000000000000..a261c7d41deb
--- /dev/null
+++ b/drivers/interconnect/Kconfig
@@ -0,0 +1,10 @@
1menuconfig INTERCONNECT
2 tristate "On-Chip Interconnect management support"
3 help
4 Support for management of the on-chip interconnects.
5
6 This framework is designed to provide a generic interface for
7 managing the interconnects in a SoC.
8
9 If unsure, say no.
10
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
new file mode 100644
index 000000000000..7a01f33b5593
--- /dev/null
+++ b/drivers/interconnect/Makefile
@@ -0,0 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0
2
3icc-core-objs := core.o
4
5obj-$(CONFIG_INTERCONNECT) += icc-core.o
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
new file mode 100644
index 000000000000..2b937b4f43c4
--- /dev/null
+++ b/drivers/interconnect/core.c
@@ -0,0 +1,567 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Interconnect framework core driver
4 *
5 * Copyright (c) 2017-2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
9#include <linux/device.h>
10#include <linux/idr.h>
11#include <linux/init.h>
12#include <linux/interconnect.h>
13#include <linux/interconnect-provider.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/overflow.h>
19
20static DEFINE_IDR(icc_idr);
21static LIST_HEAD(icc_providers);
22static DEFINE_MUTEX(icc_lock);
23
24/**
25 * struct icc_req - constraints that are attached to each node
26 * @req_node: entry in list of requests for the particular @node
27 * @node: the interconnect node to which this constraint applies
28 * @dev: reference to the device that sets the constraints
29 * @avg_bw: an integer describing the average bandwidth in kBps
30 * @peak_bw: an integer describing the peak bandwidth in kBps
31 */
32struct icc_req {
33 struct hlist_node req_node;
34 struct icc_node *node;
35 struct device *dev;
36 u32 avg_bw;
37 u32 peak_bw;
38};
39
40/**
41 * struct icc_path - interconnect path structure
42 * @num_nodes: number of hops (nodes)
43 * @reqs: array of the requests applicable to this path of nodes
44 */
45struct icc_path {
46 size_t num_nodes;
47 struct icc_req reqs[];
48};
49
50static struct icc_node *node_find(const int id)
51{
52 return idr_find(&icc_idr, id);
53}
54
55static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
56 ssize_t num_nodes)
57{
58 struct icc_node *node = dst;
59 struct icc_path *path;
60 int i;
61
62 path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
63 if (!path)
64 return ERR_PTR(-ENOMEM);
65
66 path->num_nodes = num_nodes;
67
68 for (i = num_nodes - 1; i >= 0; i--) {
69 node->provider->users++;
70 hlist_add_head(&path->reqs[i].req_node, &node->req_list);
71 path->reqs[i].node = node;
72 path->reqs[i].dev = dev;
73 /* reference to previous node was saved during path traversal */
74 node = node->reverse;
75 }
76
77 return path;
78}
79
80static struct icc_path *path_find(struct device *dev, struct icc_node *src,
81 struct icc_node *dst)
82{
83 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
84 struct icc_node *n, *node = NULL;
85 struct list_head traverse_list;
86 struct list_head edge_list;
87 struct list_head visited_list;
88 size_t i, depth = 1;
89 bool found = false;
90
91 INIT_LIST_HEAD(&traverse_list);
92 INIT_LIST_HEAD(&edge_list);
93 INIT_LIST_HEAD(&visited_list);
94
95 list_add(&src->search_list, &traverse_list);
96 src->reverse = NULL;
97
98 do {
99 list_for_each_entry_safe(node, n, &traverse_list, search_list) {
100 if (node == dst) {
101 found = true;
102 list_splice_init(&edge_list, &visited_list);
103 list_splice_init(&traverse_list, &visited_list);
104 break;
105 }
106 for (i = 0; i < node->num_links; i++) {
107 struct icc_node *tmp = node->links[i];
108
109 if (!tmp) {
110 path = ERR_PTR(-ENOENT);
111 goto out;
112 }
113
114 if (tmp->is_traversed)
115 continue;
116
117 tmp->is_traversed = true;
118 tmp->reverse = node;
119 list_add_tail(&tmp->search_list, &edge_list);
120 }
121 }
122
123 if (found)
124 break;
125
126 list_splice_init(&traverse_list, &visited_list);
127 list_splice_init(&edge_list, &traverse_list);
128
129 /* count the hops including the source */
130 depth++;
131
132 } while (!list_empty(&traverse_list));
133
134out:
135
136 /* reset the traversed state */
137 list_for_each_entry_reverse(n, &visited_list, search_list)
138 n->is_traversed = false;
139
140 if (found)
141 path = path_init(dev, dst, depth);
142
143 return path;
144}
145
146/*
147 * We want the path to honor all bandwidth requests, so the average and peak
148 * bandwidth requirements from each consumer are aggregated at each node.
149 * The aggregation is platform specific, so each platform can customize it by
150 * implementing its own aggregate() function.
151 */
152
153static int aggregate_requests(struct icc_node *node)
154{
155 struct icc_provider *p = node->provider;
156 struct icc_req *r;
157
158 node->avg_bw = 0;
159 node->peak_bw = 0;
160
161 hlist_for_each_entry(r, &node->req_list, req_node)
162 p->aggregate(node, r->avg_bw, r->peak_bw,
163 &node->avg_bw, &node->peak_bw);
164
165 return 0;
166}
167
168static int apply_constraints(struct icc_path *path)
169{
170 struct icc_node *next, *prev = NULL;
171 int ret = -EINVAL;
172 int i;
173
174 for (i = 0; i < path->num_nodes; i++) {
175 next = path->reqs[i].node;
176
177 /*
178 * Both endpoints should be valid master-slave pairs of the
179 * same interconnect provider that will be configured.
180 */
181 if (!prev || next->provider != prev->provider) {
182 prev = next;
183 continue;
184 }
185
186 /* set the constraints */
187 ret = next->provider->set(prev, next);
188 if (ret)
189 goto out;
190
191 prev = next;
192 }
193out:
194 return ret;
195}
196
197/**
198 * icc_set_bw() - set bandwidth constraints on an interconnect path
199 * @path: reference to the path returned by icc_get()
200 * @avg_bw: average bandwidth in kilobytes per second
201 * @peak_bw: peak bandwidth in kilobytes per second
202 *
203 * This function is used by an interconnect consumer to express its own needs
204 * in terms of bandwidth for a previously requested path between two endpoints.
205 * The requests are aggregated and each node is updated accordingly. The entire
206 * path is locked by a mutex to ensure that the set() is completed.
207 * The @path can be NULL when the "interconnects" DT properties is missing,
208 * which will mean that no constraints will be set.
209 *
210 * Returns 0 on success, or an appropriate error code otherwise.
211 */
212int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
213{
214 struct icc_node *node;
215 size_t i;
216 int ret;
217
218 if (!path)
219 return 0;
220
221 mutex_lock(&icc_lock);
222
223 for (i = 0; i < path->num_nodes; i++) {
224 node = path->reqs[i].node;
225
226 /* update the consumer request for this path */
227 path->reqs[i].avg_bw = avg_bw;
228 path->reqs[i].peak_bw = peak_bw;
229
230 /* aggregate requests for this node */
231 aggregate_requests(node);
232 }
233
234 ret = apply_constraints(path);
235 if (ret)
236 pr_debug("interconnect: error applying constraints (%d)\n",
237 ret);
238
239 mutex_unlock(&icc_lock);
240
241 return ret;
242}
243EXPORT_SYMBOL_GPL(icc_set_bw);
244
245/**
246 * icc_get() - return a handle for path between two endpoints
247 * @dev: the device requesting the path
248 * @src_id: source device port id
249 * @dst_id: destination device port id
250 *
251 * This function will search for a path between two endpoints and return an
252 * icc_path handle on success. Use icc_put() to release
253 * constraints when they are not needed anymore.
254 * If the interconnect API is disabled, NULL is returned and the consumer
255 * drivers will still build. Drivers are free to handle this specifically,
256 * but they don't have to.
257 *
258 * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
259 * interconnect API is disabled.
260 */
261struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
262{
263 struct icc_node *src, *dst;
264 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
265
266 mutex_lock(&icc_lock);
267
268 src = node_find(src_id);
269 if (!src)
270 goto out;
271
272 dst = node_find(dst_id);
273 if (!dst)
274 goto out;
275
276 path = path_find(dev, src, dst);
277 if (IS_ERR(path))
278 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
279
280out:
281 mutex_unlock(&icc_lock);
282 return path;
283}
284EXPORT_SYMBOL_GPL(icc_get);
285
286/**
287 * icc_put() - release the reference to the icc_path
288 * @path: interconnect path
289 *
290 * Use this function to release the constraints on a path when the path is
291 * no longer needed. The constraints will be re-aggregated.
292 */
293void icc_put(struct icc_path *path)
294{
295 struct icc_node *node;
296 size_t i;
297 int ret;
298
299 if (!path || WARN_ON(IS_ERR(path)))
300 return;
301
302 ret = icc_set_bw(path, 0, 0);
303 if (ret)
304 pr_err("%s: error (%d)\n", __func__, ret);
305
306 mutex_lock(&icc_lock);
307 for (i = 0; i < path->num_nodes; i++) {
308 node = path->reqs[i].node;
309 hlist_del(&path->reqs[i].req_node);
310 if (!WARN_ON(!node->provider->users))
311 node->provider->users--;
312 }
313 mutex_unlock(&icc_lock);
314
315 kfree(path);
316}
317EXPORT_SYMBOL_GPL(icc_put);
318
319static struct icc_node *icc_node_create_nolock(int id)
320{
321 struct icc_node *node;
322
323 /* check if node already exists */
324 node = node_find(id);
325 if (node)
326 return node;
327
328 node = kzalloc(sizeof(*node), GFP_KERNEL);
329 if (!node)
330 return ERR_PTR(-ENOMEM);
331
332 id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
333 if (id < 0) {
334 WARN(1, "%s: couldn't get idr\n", __func__);
335 kfree(node);
336 return ERR_PTR(id);
337 }
338
339 node->id = id;
340
341 return node;
342}
343
344/**
345 * icc_node_create() - create a node
346 * @id: node id
347 *
348 * Return: icc_node pointer on success, or ERR_PTR() on error
349 */
350struct icc_node *icc_node_create(int id)
351{
352 struct icc_node *node;
353
354 mutex_lock(&icc_lock);
355
356 node = icc_node_create_nolock(id);
357
358 mutex_unlock(&icc_lock);
359
360 return node;
361}
362EXPORT_SYMBOL_GPL(icc_node_create);
363
364/**
365 * icc_node_destroy() - destroy a node
366 * @id: node id
367 */
368void icc_node_destroy(int id)
369{
370 struct icc_node *node;
371
372 mutex_lock(&icc_lock);
373
374 node = node_find(id);
375 if (node) {
376 idr_remove(&icc_idr, node->id);
377 WARN_ON(!hlist_empty(&node->req_list));
378 }
379
380 mutex_unlock(&icc_lock);
381
382 kfree(node);
383}
384EXPORT_SYMBOL_GPL(icc_node_destroy);
385
386/**
387 * icc_link_create() - create a link between two nodes
388 * @node: source node id
389 * @dst_id: destination node id
390 *
391 * Create a link between two nodes. The nodes might belong to different
392 * interconnect providers and the @dst_id node might not exist (if the
393 * provider driver has not probed yet). So just create the @dst_id node
394 * and when the actual provider driver is probed, the rest of the node
395 * data is filled.
396 *
397 * Return: 0 on success, or an error code otherwise
398 */
399int icc_link_create(struct icc_node *node, const int dst_id)
400{
401 struct icc_node *dst;
402 struct icc_node **new;
403 int ret = 0;
404
405 if (!node->provider)
406 return -EINVAL;
407
408 mutex_lock(&icc_lock);
409
410 dst = node_find(dst_id);
411 if (!dst) {
412 dst = icc_node_create_nolock(dst_id);
413
414 if (IS_ERR(dst)) {
415 ret = PTR_ERR(dst);
416 goto out;
417 }
418 }
419
420 new = krealloc(node->links,
421 (node->num_links + 1) * sizeof(*node->links),
422 GFP_KERNEL);
423 if (!new) {
424 ret = -ENOMEM;
425 goto out;
426 }
427
428 node->links = new;
429 node->links[node->num_links++] = dst;
430
431out:
432 mutex_unlock(&icc_lock);
433
434 return ret;
435}
436EXPORT_SYMBOL_GPL(icc_link_create);
437
438/**
439 * icc_link_destroy() - destroy a link between two nodes
440 * @src: pointer to source node
441 * @dst: pointer to destination node
442 *
443 * Return: 0 on success, or an error code otherwise
444 */
445int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
446{
447 struct icc_node **new;
448 size_t slot;
449 int ret = 0;
450
451 if (IS_ERR_OR_NULL(src))
452 return -EINVAL;
453
454 if (IS_ERR_OR_NULL(dst))
455 return -EINVAL;
456
457 mutex_lock(&icc_lock);
458
459 for (slot = 0; slot < src->num_links; slot++)
460 if (src->links[slot] == dst)
461 break;
462
463 if (WARN_ON(slot == src->num_links)) {
464 ret = -ENXIO;
465 goto out;
466 }
467
468 src->links[slot] = src->links[--src->num_links];
469
470 new = krealloc(src->links, src->num_links * sizeof(*src->links),
471 GFP_KERNEL);
472 if (new)
473 src->links = new;
474
475out:
476 mutex_unlock(&icc_lock);
477
478 return ret;
479}
480EXPORT_SYMBOL_GPL(icc_link_destroy);
481
482/**
483 * icc_node_add() - add interconnect node to interconnect provider
484 * @node: pointer to the interconnect node
485 * @provider: pointer to the interconnect provider
486 */
487void icc_node_add(struct icc_node *node, struct icc_provider *provider)
488{
489 mutex_lock(&icc_lock);
490
491 node->provider = provider;
492 list_add_tail(&node->node_list, &provider->nodes);
493
494 mutex_unlock(&icc_lock);
495}
496EXPORT_SYMBOL_GPL(icc_node_add);
497
498/**
499 * icc_node_del() - delete interconnect node from interconnect provider
500 * @node: pointer to the interconnect node
501 */
502void icc_node_del(struct icc_node *node)
503{
504 mutex_lock(&icc_lock);
505
506 list_del(&node->node_list);
507
508 mutex_unlock(&icc_lock);
509}
510EXPORT_SYMBOL_GPL(icc_node_del);
511
512/**
513 * icc_provider_add() - add a new interconnect provider
514 * @provider: the interconnect provider that will be added into topology
515 *
516 * Return: 0 on success, or an error code otherwise
517 */
518int icc_provider_add(struct icc_provider *provider)
519{
520 if (WARN_ON(!provider->set))
521 return -EINVAL;
522
523 mutex_lock(&icc_lock);
524
525 INIT_LIST_HEAD(&provider->nodes);
526 list_add_tail(&provider->provider_list, &icc_providers);
527
528 mutex_unlock(&icc_lock);
529
530 dev_dbg(provider->dev, "interconnect provider added to topology\n");
531
532 return 0;
533}
534EXPORT_SYMBOL_GPL(icc_provider_add);
535
536/**
537 * icc_provider_del() - delete previously added interconnect provider
538 * @provider: the interconnect provider that will be removed from topology
539 *
540 * Return: 0 on success, or an error code otherwise
541 */
542int icc_provider_del(struct icc_provider *provider)
543{
544 mutex_lock(&icc_lock);
545 if (provider->users) {
546 pr_warn("interconnect provider still has %d users\n",
547 provider->users);
548 mutex_unlock(&icc_lock);
549 return -EBUSY;
550 }
551
552 if (!list_empty(&provider->nodes)) {
553 pr_warn("interconnect provider still has nodes\n");
554 mutex_unlock(&icc_lock);
555 return -EBUSY;
556 }
557
558 list_del(&provider->provider_list);
559 mutex_unlock(&icc_lock);
560
561 return 0;
562}
563EXPORT_SYMBOL_GPL(icc_provider_del);
564
565MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
566MODULE_DESCRIPTION("Interconnect Driver Core");
567MODULE_LICENSE("GPL v2");
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
new file mode 100644
index 000000000000..78208a754181
--- /dev/null
+++ b/include/linux/interconnect-provider.h
@@ -0,0 +1,125 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018, Linaro Ltd.
4 * Author: Georgi Djakov <georgi.djakov@linaro.org>
5 */
6
7#ifndef __LINUX_INTERCONNECT_PROVIDER_H
8#define __LINUX_INTERCONNECT_PROVIDER_H
9
10#include <linux/interconnect.h>
11
12#define icc_units_to_bps(bw) ((bw) * 1000ULL)
13
14struct icc_node;
15
16/**
17 * struct icc_provider - interconnect provider (controller) entity that might
18 * provide multiple interconnect controls
19 *
20 * @provider_list: list of the registered interconnect providers
21 * @nodes: internal list of the interconnect provider nodes
22 * @set: pointer to device specific set operation function
23 * @aggregate: pointer to device specific aggregate operation function
24 * @dev: the device this interconnect provider belongs to
25 * @users: count of active users
26 * @data: pointer to private data
27 */
28struct icc_provider {
29 struct list_head provider_list;
30 struct list_head nodes;
31 int (*set)(struct icc_node *src, struct icc_node *dst);
32 int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw,
33 u32 *agg_avg, u32 *agg_peak);
34 struct device *dev;
35 int users;
36 void *data;
37};
38
39/**
40 * struct icc_node - entity that is part of the interconnect topology
41 *
42 * @id: platform specific node id
43 * @name: node name used in debugfs
44 * @links: a list of targets pointing to where we can go next when traversing
45 * @num_links: number of links to other interconnect nodes
46 * @provider: points to the interconnect provider of this node
47 * @node_list: the list entry in the parent provider's "nodes" list
48 * @search_list: list used when walking the nodes graph
49 * @reverse: pointer to previous node when walking the nodes graph
50 * @is_traversed: flag that is used when walking the nodes graph
51 * @req_list: a list of QoS constraint requests associated with this node
52 * @avg_bw: aggregated value of average bandwidth requests from all consumers
53 * @peak_bw: aggregated value of peak bandwidth requests from all consumers
54 * @data: pointer to private data
55 */
56struct icc_node {
57 int id;
58 const char *name;
59 struct icc_node **links;
60 size_t num_links;
61
62 struct icc_provider *provider;
63 struct list_head node_list;
64 struct list_head search_list;
65 struct icc_node *reverse;
66 u8 is_traversed:1;
67 struct hlist_head req_list;
68 u32 avg_bw;
69 u32 peak_bw;
70 void *data;
71};
72
73#if IS_ENABLED(CONFIG_INTERCONNECT)
74
75struct icc_node *icc_node_create(int id);
76void icc_node_destroy(int id);
77int icc_link_create(struct icc_node *node, const int dst_id);
78int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
79void icc_node_add(struct icc_node *node, struct icc_provider *provider);
80void icc_node_del(struct icc_node *node);
81int icc_provider_add(struct icc_provider *provider);
82int icc_provider_del(struct icc_provider *provider);
83
84#else
85
86static inline struct icc_node *icc_node_create(int id)
87{
88 return ERR_PTR(-ENOTSUPP);
89}
90
91void icc_node_destroy(int id)
92{
93}
94
95static inline int icc_link_create(struct icc_node *node, const int dst_id)
96{
97 return -ENOTSUPP;
98}
99
100int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
101{
102 return -ENOTSUPP;
103}
104
105void icc_node_add(struct icc_node *node, struct icc_provider *provider)
106{
107}
108
109void icc_node_del(struct icc_node *node)
110{
111}
112
113static inline int icc_provider_add(struct icc_provider *provider)
114{
115 return -ENOTSUPP;
116}
117
118static inline int icc_provider_del(struct icc_provider *provider)
119{
120 return -ENOTSUPP;
121}
122
123#endif /* CONFIG_INTERCONNECT */
124
125#endif /* __LINUX_INTERCONNECT_PROVIDER_H */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
new file mode 100644
index 000000000000..c331afb3a2c8
--- /dev/null
+++ b/include/linux/interconnect.h
@@ -0,0 +1,52 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018-2019, Linaro Ltd.
4 * Author: Georgi Djakov <georgi.djakov@linaro.org>
5 */
6
7#ifndef __LINUX_INTERCONNECT_H
8#define __LINUX_INTERCONNECT_H
9
10#include <linux/mutex.h>
11#include <linux/types.h>
12
13/* macros for converting to icc units */
14#define Bps_to_icc(x) ((x) / 1000)
15#define kBps_to_icc(x) (x)
16#define MBps_to_icc(x) ((x) * 1000)
17#define GBps_to_icc(x) ((x) * 1000 * 1000)
18#define bps_to_icc(x) (1)
19#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0))
20#define Mbps_to_icc(x) ((x) * 1000 / 8)
21#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
22
23struct icc_path;
24struct device;
25
26#if IS_ENABLED(CONFIG_INTERCONNECT)
27
28struct icc_path *icc_get(struct device *dev, const int src_id,
29 const int dst_id);
30void icc_put(struct icc_path *path);
31int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
32
33#else
34
35static inline struct icc_path *icc_get(struct device *dev, const int src_id,
36 const int dst_id)
37{
38 return NULL;
39}
40
41static inline void icc_put(struct icc_path *path)
42{
43}
44
45static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
46{
47 return 0;
48}
49
50#endif /* CONFIG_INTERCONNECT */
51
52#endif /* __LINUX_INTERCONNECT_H */