aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/binheap.h9
-rw-r--r--include/litmus/fdso.h3
-rw-r--r--include/litmus/rt_param.h7
-rw-r--r--litmus/fdso.c1
-rw-r--r--litmus/litmus.c3
-rw-r--r--litmus/sched_gsn_edf.c322
6 files changed, 340 insertions, 5 deletions
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h
index 412a4c7bd4bc..ce5539d72f4d 100644
--- a/include/litmus/binheap.h
+++ b/include/litmus/binheap.h
@@ -137,7 +137,7 @@ __binheap_decrease((orig_node), (handle))
137static inline void INIT_BINHEAP_NODE(struct binheap_node *n) 137static inline void INIT_BINHEAP_NODE(struct binheap_node *n)
138{ 138{
139 n->data = NULL; 139 n->data = NULL;
140 n->parent = NULL; 140 n->parent = BINHEAP_POISON;
141 n->left = NULL; 141 n->left = NULL;
142 n->right = NULL; 142 n->right = NULL;
143 n->ref = NULL; 143 n->ref = NULL;
@@ -160,6 +160,12 @@ static inline int binheap_empty(struct binheap_handle *handle)
160 return(handle->root == NULL); 160 return(handle->root == NULL);
161} 161}
162 162
163/* Returns true (1) if binheap node is in a heap. */
164static inline int binheap_is_in_heap(struct binheap_node *node)
165{
166 return (node->parent != BINHEAP_POISON);
167}
168
163 169
164/* Update the node reference pointers. Same logic as Litmus binomial heap. */ 170/* Update the node reference pointers. Same logic as Litmus binomial heap. */
165static inline void __update_ref(struct binheap_node *parent, 171static inline void __update_ref(struct binheap_node *parent,
@@ -516,6 +522,7 @@ static inline void __binheap_delete(
516 __binheap_delete_root(handle, node_to_delete); 522 __binheap_delete_root(handle, node_to_delete);
517 523
518 node_to_delete->data = temp_data; /* restore node data pointer */ 524 node_to_delete->data = temp_data; /* restore node data pointer */
525 node_to_delete->parent = BINHEAP_POISON; /* poison the node */
519} 526}
520 527
521/** 528/**
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index 4b4ae7796788..4e7a6bf06134 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -21,8 +21,9 @@ typedef enum {
21 SRP_SEM = 1, 21 SRP_SEM = 1,
22 22
23 RSM_MUTEX = 2, 23 RSM_MUTEX = 2,
24 IKGLP_SEM = 3,
24 25
25 MAX_OBJ_TYPE = 2 26 MAX_OBJ_TYPE = 3
26} obj_type_t; 27} obj_type_t;
27 28
28struct inode_obj_id { 29struct inode_obj_id {
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 8947dd0c7eb2..3a961005e23b 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -135,8 +135,8 @@ struct rt_param {
135 * could point to self if PI does not result in 135 * could point to self if PI does not result in
136 * an increased task priority. 136 * an increased task priority.
137 */ 137 */
138 struct task_struct* inh_task; 138 struct task_struct* inh_task;
139 139
140#ifdef CONFIG_LITMUS_NESTED_LOCKING 140#ifdef CONFIG_LITMUS_NESTED_LOCKING
141 raw_spinlock_t hp_blocked_tasks_lock; 141 raw_spinlock_t hp_blocked_tasks_lock;
142 struct binheap_handle hp_blocked_tasks; 142 struct binheap_handle hp_blocked_tasks;
@@ -144,6 +144,9 @@ struct rt_param {
144 144
145 /* pointer to lock upon which is currently blocked */ 145 /* pointer to lock upon which is currently blocked */
146 struct litmus_lock* blocked_lock; 146 struct litmus_lock* blocked_lock;
147
148
149 struct task_struct* doner;
147#endif 150#endif
148 151
149 152
diff --git a/litmus/fdso.c b/litmus/fdso.c
index f192787b577d..2000ba8a92f5 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -24,6 +24,7 @@ static const struct fdso_ops* fdso_ops[] = {
24 &generic_lock_ops, /* FMLP_SEM */ 24 &generic_lock_ops, /* FMLP_SEM */
25 &generic_lock_ops, /* SRP_SEM */ 25 &generic_lock_ops, /* SRP_SEM */
26 &generic_lock_ops, /* RSM_MUTEX */ 26 &generic_lock_ops, /* RSM_MUTEX */
27 &generic_lock_ops, /* IKGLP_SEM */
27}; 28};
28 29
29static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) 30static int fdso_create(void** obj_ref, obj_type_t type, void* __user config)
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 9d4fbd2f8a65..3dcefe7a08d6 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -316,6 +316,8 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
316#ifdef CONFIG_LITMUS_NESTED_LOCKING 316#ifdef CONFIG_LITMUS_NESTED_LOCKING
317 WARN_ON(p->rt_param.blocked_lock); 317 WARN_ON(p->rt_param.blocked_lock);
318 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); 318 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks));
319
320 WARN_ON(p->rt_param.doner);
319#endif 321#endif
320 322
321 /* Cleanup everything else. */ 323 /* Cleanup everything else. */
@@ -491,6 +493,7 @@ void litmus_exec(void)
491 493
492 if (is_realtime(p)) { 494 if (is_realtime(p)) {
493 WARN_ON(p->rt_param.inh_task); 495 WARN_ON(p->rt_param.inh_task);
496 WARN_ON(p->rt_param.doner);
494 if (tsk_rt(p)->ctrl_page) { 497 if (tsk_rt(p)->ctrl_page) {
495 free_page((unsigned long) tsk_rt(p)->ctrl_page); 498 free_page((unsigned long) tsk_rt(p)->ctrl_page);
496 tsk_rt(p)->ctrl_page = NULL; 499 tsk_rt(p)->ctrl_page = NULL;
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 99c93ae65e06..10e14613655b 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -13,6 +13,10 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15 15
16#ifdef CONFIG_LITMUS_NESTED_LOCKING
17#include <linux/uaccess.h>
18#endif
19
16#include <litmus/litmus.h> 20#include <litmus/litmus.h>
17#include <litmus/jobs.h> 21#include <litmus/jobs.h>
18#include <litmus/sched_plugin.h> 22#include <litmus/sched_plugin.h>
@@ -1457,6 +1461,318 @@ static struct litmus_lock* gsnedf_new_rsm_mutex(void)
1457 1461
1458/* **** lock constructor **** */ 1462/* **** lock constructor **** */
1459 1463
1464
1465
1466
1467
1468
1469
1470/* ******************** IKGLP ********************** */
1471
1472/* struct for semaphore with priority inheritance */
1473struct fifo_queue
1474{
1475 wait_queue_head_t wait;
1476 struct task_struct* owner;
1477 struct task_struct* hp_waiter;
1478 int count; /* number of waiters + holder */
1479};
1480
1481struct ikglp_donor_node
1482{
1483 struct task_struct *donor;
1484 struct task_struct *donee;
1485 struct binheap_node node;
1486};
1487
1488static int ikglp_doner_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
1489{
1490 struct ikglp_donor_node *d_a = (struct ikglp_donor_node*)binheap_entry(a, struct donor_node, node);
1491 struct ikglp_donor_node *d_b = (struct ikglp_donor_node*)binheap_entry(b, struct donor_node, node);
1492
1493 return __edf_higher_prio(d_a->donor, BASE, d_b->donor, BASE);
1494}
1495
1496
1497struct ikglp_heap_node
1498{
1499 struct task_struct *task;
1500 struct binheap_node node;
1501};
1502
1503static int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
1504{
1505 struct ikglp_heap_node *d_a = (struct ikglp_heap_node*)binheap_entry(a, struct ikglp_heap_node, node);
1506 struct ikglp_heap_node *d_b = (struct ikglp_heap_node*)binheap_entry(b, struct ikglp_heap_node, node);
1507
1508 return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE);
1509}
1510
1511static int ikglp_edf_min_heap_order(struct binheap_node *a, struct binheap_node *b)
1512{
1513 struct ikglp_heap_node *d_a = (struct ikglp_heap_node*)binheap_entry(a, struct ikglp_heap_node, node);
1514 struct ikglp_heap_node *d_b = (struct ikglp_heap_node*)binheap_entry(b, struct ikglp_heap_node, node);
1515
1516 // note reversed order
1517 return edf_higher_prio(d_b->task, d_a->task);
1518}
1519
1520
1521struct ikglp_semaphore
1522{
1523 struct litmus_lock litmus_lock;
1524
1525 raw_spinlock_t lock;
1526
1527 int nr_replicas; // AKA k
1528 int nr_queued; // num requests in fifos + holders
1529 int max_nr_queued; // capacity of fifos + holders
1530 int max_fifo_len; // max len of a fifo queue
1531
1532 struct binheap_handle global_heap; // max-heap, base prio
1533 struct ikglp_heap_node *global_holder_nodes; // holders need their nodes allocated dynamically
1534
1535 struct binheap_handle fifos_heap; // min-heap, effective prio
1536 struct ikglp_heap_node *fifos_holder_nodes; // holders need their nodes allocated dynamically
1537
1538 struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue
1539
1540 /* data structures for holding requests */
1541 struct fifo_queue *fifo_queues; // array nr_replicas in length
1542 struct binheap_handle priority_queue; // max-heap, base prio
1543 struct binheap_handle doners; // max-heap, base prio
1544};
1545
1546static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
1547{
1548 return container_of(lock, struct ikglp_semaphore, litmus_lock);
1549}
1550
1551
1552static inline int ikglp_get_idx(struct ikglp_semaphore *sem,
1553 struct fifo_queue *queue)
1554{
1555 return (queue - &sem->fifo_queues[0]);
1556}
1557
1558static inline struct fifo_queue* ikglp_get_queue(
1559 struct ikglp_semaphore *sem,
1560 struct task_struct *holder)
1561{
1562 int i;
1563 for(i = 0; i < sem->nr_replicas; ++i)
1564 if(sem->fifo_queues[i].owner == holder)
1565 return(&sem->fifo_queues[i]);
1566 return(NULL);
1567}
1568
1569static struct task_struct* ikglp_find_hp_waiter(
1570 struct fifo_queue *kqueue,
1571 struct task_struct *skip)
1572{
1573 struct list_head *pos;
1574 struct task_struct *queued, *found = NULL;
1575
1576 list_for_each(pos, &kqueue->wait.task_list) {
1577 queued = (struct task_struct*) list_entry(pos, wait_queue_t, task_list)->private;
1578
1579 /* Compare task prios, find high prio task. */
1580 if (queued != skip && edf_higher_prio(queued, found))
1581 found = queued;
1582 }
1583 return found;
1584}
1585
1586static inline struct fifo_queue* ikglp_find_shortest(
1587 struct ikglp_semaphore *sem,
1588 struct fifo_queue *search_start)
1589{
1590 // we start our search at search_start instead of at the beginning of the
1591 // queue list to load-balance across all resources.
1592 struct fifo_queue* step = search_start;
1593 struct fifo_queue* shortest = sem->shortest_fifo_queue;
1594
1595 do {
1596 step = (step+1 != &sem->fifo_queues[sem->nr_replicas]) ?
1597 step+1 : &sem->fifo_queues[0];
1598
1599 if(step->count < shortest->count) {
1600 shortest = step;
1601 if(step->count == 0)
1602 break; /* can't get any shorter */
1603 }
1604
1605 }while(step != search_start);
1606
1607 return(shortest);
1608}
1609
1610
1611
1612
1613static int gsnedf_ikglp_lock(struct litmus_lock* l)
1614{
1615 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1616 return 0;
1617}
1618
1619static int gsnedf_ikglp_unlock(struct litmus_lock* l)
1620{
1621 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1622 return 0;
1623}
1624
1625static int gsnedf_ikglp_close(struct litmus_lock* l)
1626{
1627 struct task_struct *t = current;
1628 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1629 unsigned long flags;
1630
1631 int owner = 0;
1632 int i;
1633
1634 raw_spin_lock_irqsave(&sem->lock, flags);
1635
1636 for(i = 0; i < sem->nr_replicas; ++i) {
1637 if(sem->fifo_queues[i].owner == t) {
1638 owner = 1;
1639 break;
1640 }
1641 }
1642
1643 raw_spin_unlock_irqrestore(&sem->lock, flags);
1644
1645 if (owner)
1646 gsnedf_ikglp_unlock(l);
1647
1648 return 0;
1649}
1650
1651static void gsnedf_ikglp_free(struct litmus_lock* l)
1652{
1653 struct ikglp_semaphore *sem = ikglp_from_lock(l);
1654
1655 kfree(sem->fifo_queues);
1656 kfree(sem->fifos_holder_nodes);
1657 kfree(sem->global_holder_nodes);
1658 kfree(sem);
1659}
1660
1661static struct litmus_lock_ops gsnedf_ikglp_lock_ops = {
1662 .lock = gsnedf_ikglp_lock,
1663 .unlock = gsnedf_ikglp_unlock,
1664
1665 // ikglp can only be an outer-most lock.
1666 .propagate_increase_inheritance = NULL,
1667 .propagate_decrease_inheritance = NULL,
1668
1669 .close = gsnedf_ikglp_close,
1670 .deallocate = gsnedf_ikglp_free,
1671};
1672
1673static struct litmus_lock* gsnedf_new_ikglp(void* __user arg, int* ret_code)
1674{
1675 struct ikglp_semaphore* sem;
1676 int nr_replicas = 0;
1677 int i;
1678
1679 if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas)))
1680 {
1681 *ret_code = -EINVAL;
1682 return(NULL);
1683 }
1684 if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas)))
1685 {
1686 *ret_code = -EINVAL;
1687 return(NULL);
1688 }
1689 if(nr_replicas < 1)
1690 {
1691 *ret_code = -EINVAL;
1692 return(NULL);
1693 }
1694
1695 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1696 if(!sem)
1697 {
1698 *ret_code = -ENOMEM;
1699 return NULL;
1700 }
1701
1702 sem->global_holder_nodes = kmalloc(sizeof(struct ikglp_heap_node)*nr_replicas, GFP_KERNEL);
1703 if(!sem->global_holder_nodes)
1704 {
1705 kfree(sem);
1706 *ret_code = -ENOMEM;
1707 return NULL;
1708 }
1709
1710 sem->fifos_holder_nodes = kmalloc(sizeof(struct ikglp_heap_node)*nr_replicas, GFP_KERNEL);
1711 if(!sem->fifos_holder_nodes)
1712 {
1713 kfree(sem->global_holder_nodes);
1714 kfree(sem);
1715 *ret_code = -ENOMEM;
1716 return NULL;
1717 }
1718
1719 sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL);
1720 if(!sem->fifo_queues)
1721 {
1722 kfree(sem->fifos_holder_nodes);
1723 kfree(sem->global_holder_nodes);
1724 kfree(sem);
1725 *ret_code = -ENOMEM;
1726 return NULL;
1727 }
1728
1729 sem->litmus_lock.ops = &gsnedf_ikglp_lock_ops;
1730
1731#ifdef CONFIG_DEBUG_SPINLOCK
1732 {
1733 __raw_spin_lock_init(&sem->lock, ((struct litmus_lock*)sem)->cheat_lockdep, &((struct litmus_lock*)sem)->key);
1734 }
1735#else
1736 raw_spin_lock_init(&sem->lock);
1737#endif
1738
1739 sem->nr_replicas = nr_replicas;
1740 sem->nr_queued = 0;
1741
1742 sem->max_fifo_len = (num_online_cpus()/nr_replicas) + ((num_online_cpus()%nr_replicas) != 0) - 1; // -1 to exclude holder
1743 sem->max_nr_queued = (sem->max_fifo_len * nr_replicas) + nr_replicas; // capacity of fifos + holders
1744
1745 for(i = 0; i < nr_replicas; ++i)
1746 {
1747 sem->fifo_queues[i].owner = NULL;
1748 sem->fifo_queues[i].hp_waiter = NULL;
1749 init_waitqueue_head(&sem->fifo_queues[i].wait);
1750 sem->fifo_queues[i].count = 0;
1751 }
1752
1753 sem->shortest_fifo_queue = &sem->fifo_queues[0];
1754
1755
1756 // init heaps
1757 INIT_BINHEAP_HANDLE(&sem->global_heap, ikglp_edf_max_heap_base_priority_order);
1758 INIT_BINHEAP_HANDLE(&sem->fifos_heap, ikglp_edf_min_heap_order);
1759 INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order);
1760 INIT_BINHEAP_HANDLE(&sem->doners, ikglp_doner_edf_max_heap_base_priority_order);
1761
1762 // init heap nodes used by holders
1763 for(i = 0; i < nr_replicas; ++i) {
1764 INIT_BINHEAP_NODE(&(sem->global_holder_nodes[i].node));
1765 INIT_BINHEAP_NODE(&(sem->fifos_holder_nodes[i].node));
1766 }
1767
1768 *ret_code = 0;
1769 return &sem->litmus_lock;
1770}
1771
1772
1773
1774
1775
1460#endif 1776#endif
1461 1777
1462 1778
@@ -1678,7 +1994,7 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
1678 1994
1679 1995
1680static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, 1996static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
1681 void* __user unused) 1997 void* __user args)
1682{ 1998{
1683 int err; 1999 int err;
1684 2000
@@ -1694,6 +2010,10 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
1694 case RSM_MUTEX: 2010 case RSM_MUTEX:
1695 *lock = gsnedf_new_rsm_mutex(); 2011 *lock = gsnedf_new_rsm_mutex();
1696 break; 2012 break;
2013
2014 case IKGLP_SEM:
2015 *lock = gsnedf_new_ikglp(args, &err);
2016 break;
1697#endif 2017#endif
1698 2018
1699 default: 2019 default: