aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-08-03 21:43:41 -0400
committerDave Airlie <airlied@redhat.com>2009-09-07 21:45:15 -0400
commitec2a4c3fdc8e82fe82a25d800e85c1ea06b74372 (patch)
treeb49f3ebe3e356fa8d17f15e9a5421851cb90024b
parentf1938cd6e900a85de64184e46d841efc9efd3484 (diff)
drm/i915: get the bridge device once.
The driver gets the bridge device in a number of places, upcoming vga arb code paths need the bridge device, however they need it in under a lock, and the pci lookup can allocate memory. So clean this code up before then and get the bridge once for the driver lifetime. Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c65
3 files changed, 44 insertions, 55 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 91ef4c150193..9909505d070a 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -898,6 +898,18 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
898 return 0; 898 return 0;
899} 899}
900 900
901static int i915_get_bridge_dev(struct drm_device *dev)
902{
903 struct drm_i915_private *dev_priv = dev->dev_private;
904
905 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
906 if (!dev_priv->bridge_dev) {
907 DRM_ERROR("bridge device not found\n");
908 return -1;
909 }
910 return 0;
911}
912
901/** 913/**
902 * i915_probe_agp - get AGP bootup configuration 914 * i915_probe_agp - get AGP bootup configuration
903 * @pdev: PCI device 915 * @pdev: PCI device
@@ -911,20 +923,13 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
911static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, 923static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
912 uint32_t *preallocated_size) 924 uint32_t *preallocated_size)
913{ 925{
914 struct pci_dev *bridge_dev; 926 struct drm_i915_private *dev_priv = dev->dev_private;
915 u16 tmp = 0; 927 u16 tmp = 0;
916 unsigned long overhead; 928 unsigned long overhead;
917 unsigned long stolen; 929 unsigned long stolen;
918 930
919 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
920 if (!bridge_dev) {
921 DRM_ERROR("bridge device not found\n");
922 return -1;
923 }
924
925 /* Get the fb aperture size and "stolen" memory amount. */ 931 /* Get the fb aperture size and "stolen" memory amount. */
926 pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp); 932 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
927 pci_dev_put(bridge_dev);
928 933
929 *aperture_size = 1024 * 1024; 934 *aperture_size = 1024 * 1024;
930 *preallocated_size = 1024 * 1024; 935 *preallocated_size = 1024 * 1024;
@@ -1176,11 +1181,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1176 base = drm_get_resource_start(dev, mmio_bar); 1181 base = drm_get_resource_start(dev, mmio_bar);
1177 size = drm_get_resource_len(dev, mmio_bar); 1182 size = drm_get_resource_len(dev, mmio_bar);
1178 1183
1184 if (i915_get_bridge_dev(dev)) {
1185 ret = -EIO;
1186 goto free_priv;
1187 }
1188
1179 dev_priv->regs = ioremap(base, size); 1189 dev_priv->regs = ioremap(base, size);
1180 if (!dev_priv->regs) { 1190 if (!dev_priv->regs) {
1181 DRM_ERROR("failed to map registers\n"); 1191 DRM_ERROR("failed to map registers\n");
1182 ret = -EIO; 1192 ret = -EIO;
1183 goto free_priv; 1193 goto put_bridge;
1184 } 1194 }
1185 1195
1186 dev_priv->mm.gtt_mapping = 1196 dev_priv->mm.gtt_mapping =
@@ -1292,6 +1302,8 @@ out_iomapfree:
1292 io_mapping_free(dev_priv->mm.gtt_mapping); 1302 io_mapping_free(dev_priv->mm.gtt_mapping);
1293out_rmmap: 1303out_rmmap:
1294 iounmap(dev_priv->regs); 1304 iounmap(dev_priv->regs);
1305put_bridge:
1306 pci_dev_put(dev_priv->bridge_dev);
1295free_priv: 1307free_priv:
1296 kfree(dev_priv); 1308 kfree(dev_priv);
1297 return ret; 1309 return ret;
@@ -1335,6 +1347,7 @@ int i915_driver_unload(struct drm_device *dev)
1335 i915_gem_lastclose(dev); 1347 i915_gem_lastclose(dev);
1336 } 1348 }
1337 1349
1350 pci_dev_put(dev_priv->bridge_dev);
1338 kfree(dev->dev_private); 1351 kfree(dev->dev_private);
1339 1352
1340 return 0; 1353 return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2d5bce643e6a..77ed060b4292 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -155,6 +155,7 @@ typedef struct drm_i915_private {
155 155
156 void __iomem *regs; 156 void __iomem *regs;
157 157
158 struct pci_dev *bridge_dev;
158 drm_i915_ring_buffer_t ring; 159 drm_i915_ring_buffer_t ring;
159 160
160 drm_dma_handle_t *status_page_dmah; 161 drm_dma_handle_t *status_page_dmah;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e774a4a1a503..200e398453ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -94,23 +94,15 @@
94static int 94static int
95intel_alloc_mchbar_resource(struct drm_device *dev) 95intel_alloc_mchbar_resource(struct drm_device *dev)
96{ 96{
97 struct pci_dev *bridge_dev;
98 drm_i915_private_t *dev_priv = dev->dev_private; 97 drm_i915_private_t *dev_priv = dev->dev_private;
99 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 98 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
100 u32 temp_lo, temp_hi = 0; 99 u32 temp_lo, temp_hi = 0;
101 u64 mchbar_addr; 100 u64 mchbar_addr;
102 int ret = 0; 101 int ret = 0;
103 102
104 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
105 if (!bridge_dev) {
106 DRM_DEBUG("no bridge dev?!\n");
107 ret = -ENODEV;
108 goto out;
109 }
110
111 if (IS_I965G(dev)) 103 if (IS_I965G(dev))
112 pci_read_config_dword(bridge_dev, reg + 4, &temp_hi); 104 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
113 pci_read_config_dword(bridge_dev, reg, &temp_lo); 105 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 106 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
115 107
116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 108 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
@@ -118,30 +110,28 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
118 if (mchbar_addr && 110 if (mchbar_addr &&
119 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 111 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
120 ret = 0; 112 ret = 0;
121 goto out_put; 113 goto out;
122 } 114 }
123#endif 115#endif
124 116
125 /* Get some space for it */ 117 /* Get some space for it */
126 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, 118 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
127 MCHBAR_SIZE, MCHBAR_SIZE, 119 MCHBAR_SIZE, MCHBAR_SIZE,
128 PCIBIOS_MIN_MEM, 120 PCIBIOS_MIN_MEM,
129 0, pcibios_align_resource, 121 0, pcibios_align_resource,
130 bridge_dev); 122 dev_priv->bridge_dev);
131 if (ret) { 123 if (ret) {
132 DRM_DEBUG("failed bus alloc: %d\n", ret); 124 DRM_DEBUG("failed bus alloc: %d\n", ret);
133 dev_priv->mch_res.start = 0; 125 dev_priv->mch_res.start = 0;
134 goto out_put; 126 goto out;
135 } 127 }
136 128
137 if (IS_I965G(dev)) 129 if (IS_I965G(dev))
138 pci_write_config_dword(bridge_dev, reg + 4, 130 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
139 upper_32_bits(dev_priv->mch_res.start)); 131 upper_32_bits(dev_priv->mch_res.start));
140 132
141 pci_write_config_dword(bridge_dev, reg, 133 pci_write_config_dword(dev_priv->bridge_dev, reg,
142 lower_32_bits(dev_priv->mch_res.start)); 134 lower_32_bits(dev_priv->mch_res.start));
143out_put:
144 pci_dev_put(bridge_dev);
145out: 135out:
146 return ret; 136 return ret;
147} 137}
@@ -150,44 +140,36 @@ out:
150static bool 140static bool
151intel_setup_mchbar(struct drm_device *dev) 141intel_setup_mchbar(struct drm_device *dev)
152{ 142{
153 struct pci_dev *bridge_dev; 143 drm_i915_private_t *dev_priv = dev->dev_private;
154 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 144 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
155 u32 temp; 145 u32 temp;
156 bool need_disable = false, enabled; 146 bool need_disable = false, enabled;
157 147
158 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
159 if (!bridge_dev) {
160 DRM_DEBUG("no bridge dev?!\n");
161 goto out;
162 }
163
164 if (IS_I915G(dev) || IS_I915GM(dev)) { 148 if (IS_I915G(dev) || IS_I915GM(dev)) {
165 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp); 149 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
166 enabled = !!(temp & DEVEN_MCHBAR_EN); 150 enabled = !!(temp & DEVEN_MCHBAR_EN);
167 } else { 151 } else {
168 pci_read_config_dword(bridge_dev, mchbar_reg, &temp); 152 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
169 enabled = temp & 1; 153 enabled = temp & 1;
170 } 154 }
171 155
172 /* If it's already enabled, don't have to do anything */ 156 /* If it's already enabled, don't have to do anything */
173 if (enabled) 157 if (enabled)
174 goto out_put; 158 goto out;
175 159
176 if (intel_alloc_mchbar_resource(dev)) 160 if (intel_alloc_mchbar_resource(dev))
177 goto out_put; 161 goto out;
178 162
179 need_disable = true; 163 need_disable = true;
180 164
181 /* Space is allocated or reserved, so enable it. */ 165 /* Space is allocated or reserved, so enable it. */
182 if (IS_I915G(dev) || IS_I915GM(dev)) { 166 if (IS_I915G(dev) || IS_I915GM(dev)) {
183 pci_write_config_dword(bridge_dev, DEVEN_REG, 167 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
184 temp | DEVEN_MCHBAR_EN); 168 temp | DEVEN_MCHBAR_EN);
185 } else { 169 } else {
186 pci_read_config_dword(bridge_dev, mchbar_reg, &temp); 170 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
187 pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1); 171 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
188 } 172 }
189out_put:
190 pci_dev_put(bridge_dev);
191out: 173out:
192 return need_disable; 174 return need_disable;
193} 175}
@@ -196,25 +178,18 @@ static void
196intel_teardown_mchbar(struct drm_device *dev, bool disable) 178intel_teardown_mchbar(struct drm_device *dev, bool disable)
197{ 179{
198 drm_i915_private_t *dev_priv = dev->dev_private; 180 drm_i915_private_t *dev_priv = dev->dev_private;
199 struct pci_dev *bridge_dev;
200 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 181 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
201 u32 temp; 182 u32 temp;
202 183
203 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
204 if (!bridge_dev) {
205 DRM_DEBUG("no bridge dev?!\n");
206 return;
207 }
208
209 if (disable) { 184 if (disable) {
210 if (IS_I915G(dev) || IS_I915GM(dev)) { 185 if (IS_I915G(dev) || IS_I915GM(dev)) {
211 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp); 186 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
212 temp &= ~DEVEN_MCHBAR_EN; 187 temp &= ~DEVEN_MCHBAR_EN;
213 pci_write_config_dword(bridge_dev, DEVEN_REG, temp); 188 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
214 } else { 189 } else {
215 pci_read_config_dword(bridge_dev, mchbar_reg, &temp); 190 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
216 temp &= ~1; 191 temp &= ~1;
217 pci_write_config_dword(bridge_dev, mchbar_reg, temp); 192 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
218 } 193 }
219 } 194 }
220 195
class="hl kwd">async_lcp_peek(struct asyncppp *ap, unsigned char *data, int len, int inbound); static struct ppp_channel_ops async_ops = { ppp_async_send, ppp_async_ioctl }; /* * Routines implementing the PPP line discipline. */ /* * We have a potential race on dereferencing tty->disc_data, * because the tty layer provides no locking at all - thus one * cpu could be running ppp_asynctty_receive while another * calls ppp_asynctty_close, which zeroes tty->disc_data and * frees the memory that ppp_asynctty_receive is using. The best * way to fix this is to use a rwlock in the tty struct, but for now * we use a single global rwlock for all ttys in ppp line discipline. * * FIXME: this is no longer true. The _close path for the ldisc is * now guaranteed to be sane. */ static DEFINE_RWLOCK(disc_data_lock); static struct asyncppp *ap_get(struct tty_struct *tty) { struct asyncppp *ap; read_lock(&disc_data_lock); ap = tty->disc_data; if (ap != NULL) atomic_inc(&ap->refcnt); read_unlock(&disc_data_lock); return ap; } static void ap_put(struct asyncppp *ap) { if (atomic_dec_and_test(&ap->refcnt)) up(&ap->dead_sem); } /* * Called when a tty is put into PPP line discipline. Called in process * context. */ static int ppp_asynctty_open(struct tty_struct *tty) { struct asyncppp *ap; int err; int speed; if (tty->ops->write == NULL) return -EOPNOTSUPP; err = -ENOMEM; ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (!ap) goto out; /* initialize the asyncppp structure */ ap->tty = tty; ap->mru = PPP_MRU; spin_lock_init(&ap->xmit_lock); spin_lock_init(&ap->recv_lock); ap->xaccm[0] = ~0U; ap->xaccm[3] = 0x60000000U; ap->raccm = ~0U; ap->optr = ap->obuf; ap->olim = ap->obuf; ap->lcp_fcs = -1; skb_queue_head_init(&ap->rqueue); tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); atomic_set(&ap->refcnt, 1); init_MUTEX_LOCKED(&ap->dead_sem); ap->chan.private = ap; ap->chan.ops = &async_ops; ap->chan.mtu = PPP_MRU; speed = tty_get_baud_rate(tty); ap->chan.speed = speed; err = ppp_register_channel(&ap->chan); if (err) goto out_free; tty->disc_data = ap; tty->receive_room = 65536; return 0; out_free: kfree(ap); out: return err; } /* * Called when the tty is put into another line discipline * or it hangs up. We have to wait for any cpu currently * executing in any of the other ppp_asynctty_* routines to * finish before we can call ppp_unregister_channel and free * the asyncppp struct. This routine must be called from * process context, not interrupt or softirq context. */ static void ppp_asynctty_close(struct tty_struct *tty) { struct asyncppp *ap; write_lock_irq(&disc_data_lock); ap = tty->disc_data; tty->disc_data = NULL; write_unlock_irq(&disc_data_lock); if (!ap) return; /* * We have now ensured that nobody can start using ap from now * on, but we have to wait for all existing users to finish. * Note that ppp_unregister_channel ensures that no calls to * our channel ops (i.e. ppp_async_send/ioctl) are in progress * by the time it returns. */ if (!atomic_dec_and_test(&ap->refcnt)) down(&ap->dead_sem); tasklet_kill(&ap->tsk); ppp_unregister_channel(&ap->chan); kfree_skb(ap->rpkt); skb_queue_purge(&ap->rqueue); kfree_skb(ap->tpkt); kfree(ap); } /* * Called on tty hangup in process context. * * Wait for I/O to driver to complete and unregister PPP channel. * This is already done by the close routine, so just call that. */ static int ppp_asynctty_hangup(struct tty_struct *tty) { ppp_asynctty_close(tty); return 0; } /* * Read does nothing - no data is ever available this way. * Pppd reads and writes packets via /dev/ppp instead. */ static ssize_t ppp_asynctty_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t count) { return -EAGAIN; } /* * Write on the tty does nothing, the packets all come in * from the ppp generic stuff. */ static ssize_t ppp_asynctty_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t count) { return -EAGAIN; } /* * Called in process context only. May be re-entered by multiple * ioctl calling threads. */ static int ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct asyncppp *ap = ap_get(tty); int err, val; int __user *p = (int __user *)arg; if (!ap) return -ENXIO; err = -EFAULT; switch (cmd) { case PPPIOCGCHAN: err = -EFAULT; if (put_user(ppp_channel_index(&ap->chan), p)) break; err = 0; break; case PPPIOCGUNIT: err = -EFAULT; if (put_user(ppp_unit_number(&ap->chan), p)) break; err = 0; break; case TCFLSH: /* flush our buffers and the serial port's buffer */ if (arg == TCIOFLUSH || arg == TCOFLUSH) ppp_async_flush_output(ap); err = tty_perform_flush(tty, arg); break; case FIONREAD: val = 0; if (put_user(val, p)) break; err = 0; break; default: /* Try the various mode ioctls */ err = tty_mode_ioctl(tty, file, cmd, arg); } ap_put(ap); return err; } /* No kernel lock - fine */ static unsigned int ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { return 0; } /* * This can now be called from hard interrupt level as well * as soft interrupt level or mainline. */ static void ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, char *cflags, int count) { struct asyncppp *ap = ap_get(tty); unsigned long flags; if (!ap) return; spin_lock_irqsave(&ap->recv_lock, flags); ppp_async_input(ap, buf, cflags, count); spin_unlock_irqrestore(&ap->recv_lock, flags); if (!skb_queue_empty(&ap->rqueue)) tasklet_schedule(&ap->tsk); ap_put(ap); tty_unthrottle(tty); } static void ppp_asynctty_wakeup(struct tty_struct *tty) { struct asyncppp *ap = ap_get(tty); clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); if (!ap) return; set_bit(XMIT_WAKEUP, &ap->xmit_flags); tasklet_schedule(&ap->tsk); ap_put(ap); } static struct tty_ldisc_ops ppp_ldisc = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "ppp", .open = ppp_asynctty_open, .close = ppp_asynctty_close, .hangup = ppp_asynctty_hangup, .read = ppp_asynctty_read, .write = ppp_asynctty_write, .ioctl = ppp_asynctty_ioctl, .poll = ppp_asynctty_poll, .receive_buf = ppp_asynctty_receive, .write_wakeup = ppp_asynctty_wakeup, }; static int __init ppp_async_init(void) { int err; err = tty_register_ldisc(N_PPP, &ppp_ldisc); if (err != 0) printk(KERN_ERR "PPP_async: error %d registering line disc.\n", err); return err; } /* * The following routines provide the PPP channel interface. */ static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) { struct asyncppp *ap = chan->private; void __user *argp = (void __user *)arg; int __user *p = argp; int err, val; u32 accm[8]; err = -EFAULT; switch (cmd) { case PPPIOCGFLAGS: val = ap->flags | ap->rbits; if (put_user(val, p)) break; err = 0; break; case PPPIOCSFLAGS: if (get_user(val, p)) break; ap->flags = val & ~SC_RCV_BITS; spin_lock_irq(&ap->recv_lock); ap->rbits = val & SC_RCV_BITS; spin_unlock_irq(&ap->recv_lock); err = 0; break; case PPPIOCGASYNCMAP: if (put_user(ap->xaccm[0], (u32 __user *)argp)) break; err = 0; break; case PPPIOCSASYNCMAP: if (get_user(ap->xaccm[0], (u32 __user *)argp)) break; err = 0; break; case PPPIOCGRASYNCMAP: if (put_user(ap->raccm, (u32 __user *)argp)) break; err = 0; break; case PPPIOCSRASYNCMAP: if (get_user(ap->raccm, (u32 __user *)argp)) break; err = 0; break; case PPPIOCGXASYNCMAP: if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) break; err = 0; break; case PPPIOCSXASYNCMAP: if (copy_from_user(accm, argp, sizeof(accm))) break; accm[2] &= ~0x40000000U; /* can't escape 0x5e */ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); err = 0; break; case PPPIOCGMRU: if (put_user(ap->mru, p)) break; err = 0; break; case PPPIOCSMRU: if (get_user(val, p)) break; if (val < PPP_MRU) val = PPP_MRU; ap->mru = val; err = 0; break; default: err = -ENOTTY; } return err; } /* * This is called at softirq level to deliver received packets * to the ppp_generic code, and to tell the ppp_generic code * if we can accept more output now. */ static void ppp_async_process(unsigned long arg) { struct asyncppp *ap = (struct asyncppp *) arg; struct sk_buff *skb; /* process received packets */ while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { if (skb->cb[0]) ppp_input_error(&ap->chan, 0); ppp_input(&ap->chan, skb); } /* try to push more stuff out */ if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) ppp_output_wakeup(&ap->chan); } /* * Procedures for encapsulation and framing. */ /* * Procedure to encode the data for async serial transmission. * Does octet stuffing (escaping), puts the address/control bytes * on if A/C compression is disabled, and does protocol compression. * Assumes ap->tpkt != 0 on entry. * Returns 1 if we finished the current frame, 0 otherwise. */ #define PUT_BYTE(ap, buf, c, islcp) do { \ if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ *buf++ = PPP_ESCAPE; \ *buf++ = c ^ 0x20; \ } else \ *buf++ = c; \ } while (0) static int ppp_async_encode(struct asyncppp *ap) { int fcs, i, count, c, proto; unsigned char *buf, *buflim; unsigned char *data; int islcp; buf = ap->obuf; ap->olim = buf; ap->optr = buf; i = ap->tpkt_pos; data = ap->tpkt->data; count = ap->tpkt->len; fcs = ap->tfcs; proto = (data[0] << 8) + data[1]; /* * LCP packets with code values between 1 (configure-reqest) * and 7 (code-reject) must be sent as though no options * had been negotiated. */ islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; if (i == 0) { if (islcp) async_lcp_peek(ap, data, count, 0); /* * Start of a new packet - insert the leading FLAG * character if necessary. */ if (islcp || flag_time == 0 || time_after_eq(jiffies, ap->last_xmit + flag_time)) *buf++ = PPP_FLAG; ap->last_xmit = jiffies; fcs = PPP_INITFCS; /* * Put in the address/control bytes if necessary */ if ((ap->flags & SC_COMP_AC) == 0 || islcp) { PUT_BYTE(ap, buf, 0xff, islcp); fcs = PPP_FCS(fcs, 0xff); PUT_BYTE(ap, buf, 0x03, islcp); fcs = PPP_FCS(fcs, 0x03); } } /* * Once we put in the last byte, we need to put in the FCS * and closing flag, so make sure there is at least 7 bytes * of free space in the output buffer. */ buflim = ap->obuf + OBUFSIZE - 6; while (i < count && buf < buflim) { c = data[i++]; if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) continue; /* compress protocol field */ fcs = PPP_FCS(fcs, c); PUT_BYTE(ap, buf, c, islcp); } if (i < count) { /* * Remember where we are up to in this packet. */ ap->olim = buf; ap->tpkt_pos = i; ap->tfcs = fcs; return 0; } /* * We have finished the packet. Add the FCS and flag. */ fcs = ~fcs; c = fcs & 0xff; PUT_BYTE(ap, buf, c, islcp); c = (fcs >> 8) & 0xff; PUT_BYTE(ap, buf, c, islcp); *buf++ = PPP_FLAG; ap->olim = buf; kfree_skb(ap->tpkt); ap->tpkt = NULL; return 1; } /* * Transmit-side routines. */ /* * Send a packet to the peer over an async tty line. * Returns 1 iff the packet was accepted. * If the packet was not accepted, we will call ppp_output_wakeup * at some later time. */ static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb) { struct asyncppp *ap = chan->private; ppp_async_push(ap); if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) return 0; /* already full */ ap->tpkt = skb; ap->tpkt_pos = 0; ppp_async_push(ap); return 1; } /* * Push as much data as possible out to the tty. */ static int ppp_async_push(struct asyncppp *ap) { int avail, sent, done = 0; struct tty_struct *tty = ap->tty; int tty_stuffed = 0; /* * We can get called recursively here if the tty write * function calls our wakeup function. This can happen * for example on a pty with both the master and slave * set to PPP line discipline. * We use the XMIT_BUSY bit to detect this and get out, * leaving the XMIT_WAKEUP bit set to tell the other * instance that it may now be able to write more now. */ if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) return 0; spin_lock_bh(&ap->xmit_lock); for (;;) { if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) tty_stuffed = 0; if (!tty_stuffed && ap->optr < ap->olim) { avail = ap->olim - ap->optr; set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); sent = tty->ops->write(tty, ap->optr, avail); if (sent < 0) goto flush; /* error, e.g. loss of CD */ ap->optr += sent; if (sent < avail) tty_stuffed = 1; continue; } if (ap->optr >= ap->olim && ap->tpkt) { if (ppp_async_encode(ap)) { /* finished processing ap->tpkt */ clear_bit(XMIT_FULL, &ap->xmit_flags); done = 1; } continue; } /* * We haven't made any progress this time around. * Clear XMIT_BUSY to let other callers in, but * after doing so we have to check if anyone set * XMIT_WAKEUP since we last checked it. If they * did, we should try again to set XMIT_BUSY and go * around again in case XMIT_BUSY was still set when * the other caller tried. */ clear_bit(XMIT_BUSY, &ap->xmit_flags); /* any more work to do? if not, exit the loop */ if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) || (!tty_stuffed && ap->tpkt))) break; /* more work to do, see if we can do it now */ if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) break; } spin_unlock_bh(&ap->xmit_lock); return done; flush: clear_bit(XMIT_BUSY, &ap->xmit_flags); if (ap->tpkt) { kfree_skb(ap->tpkt); ap->tpkt = NULL; clear_bit(XMIT_FULL, &ap->xmit_flags); done = 1; } ap->optr = ap->olim; spin_unlock_bh(&ap->xmit_lock); return done; } /* * Flush output from our internal buffers. * Called for the TCFLSH ioctl. Can be entered in parallel * but this is covered by the xmit_lock. */ static void ppp_async_flush_output(struct asyncppp *ap) { int done = 0; spin_lock_bh(&ap->xmit_lock); ap->optr = ap->olim; if (ap->tpkt != NULL) { kfree_skb(ap->tpkt); ap->tpkt = NULL; clear_bit(XMIT_FULL, &ap->xmit_flags); done = 1; } spin_unlock_bh(&ap->xmit_lock); if (done) ppp_output_wakeup(&ap->chan); } /* * Receive-side routines. */ /* see how many ordinary chars there are at the start of buf */ static inline int scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) { int i, c; for (i = 0; i < count; ++i) { c = buf[i]; if (c == PPP_ESCAPE || c == PPP_FLAG || (c < 0x20 && (ap->raccm & (1 << c)) != 0)) break; } return i; } /* called when a flag is seen - do end-of-packet processing */ static void process_input_packet(struct asyncppp *ap) { struct sk_buff *skb; unsigned char *p; unsigned int len, fcs, proto; skb = ap->rpkt; if (ap->state & (SC_TOSS | SC_ESCAPE)) goto err; if (skb == NULL) return; /* 0-length packet */ /* check the FCS */ p = skb->data; len = skb->len; if (len < 3) goto err; /* too short */ fcs = PPP_INITFCS; for (; len > 0; --len) fcs = PPP_FCS(fcs, *p++); if (fcs != PPP_GOODFCS) goto err; /* bad FCS */ skb_trim(skb, skb->len - 2); /* check for address/control and protocol compression */ p = skb->data; if (p[0] == PPP_ALLSTATIONS) { /* chop off address/control */ if (p[1] != PPP_UI || skb->len < 3) goto err; p = skb_pull(skb, 2); } proto = p[0]; if (proto & 1) { /* protocol is compressed */ skb_push(skb, 1)[0] = 0; } else { if (skb->len < 2) goto err; proto = (proto << 8) + p[1]; if (proto == PPP_LCP) async_lcp_peek(ap, p, skb->len, 1); } /* queue the frame to be processed */ skb->cb[0] = ap->state; skb_queue_tail(&ap->rqueue, skb); ap->rpkt = NULL; ap->state = 0; return; err: /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */ ap->state = SC_PREV_ERROR; if (skb) { /* make skb appear as freshly allocated */ skb_trim(skb, 0); skb_reserve(skb, - skb_headroom(skb)); } } /* Called when the tty driver has data for us. Runs parallel with the other ldisc functions but will not be re-entered */ static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, char *flags, int count) { struct sk_buff *skb; int c, i, j, n, s, f; unsigned char *sp; /* update bits used for 8-bit cleanness detection */ if (~ap->rbits & SC_RCV_BITS) { s = 0; for (i = 0; i < count; ++i) { c = buf[i]; if (flags && flags[i] != 0) continue; s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; c = ((c >> 4) ^ c) & 0xf; s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; } ap->rbits |= s; } while (count > 0) { /* scan through and see how many chars we can do in bulk */ if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) n = 1; else n = scan_ordinary(ap, buf, count); f = 0; if (flags && (ap->state & SC_TOSS) == 0) { /* check the flags to see if any char had an error */ for (j = 0; j < n; ++j) if ((f = flags[j]) != 0) break; } if (f != 0) { /* start tossing */ ap->state |= SC_TOSS; } else if (n > 0 && (ap->state & SC_TOSS) == 0) { /* stuff the chars in the skb */ skb = ap->rpkt; if (!skb) { skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); if (!skb) goto nomem; ap->rpkt = skb; } if (skb->len == 0) { /* Try to get the payload 4-byte aligned. * This should match the * PPP_ALLSTATIONS/PPP_UI/compressed tests in * process_input_packet, but we do not have * enough chars here to test buf[1] and buf[2]. */ if (buf[0] != PPP_ALLSTATIONS) skb_reserve(skb, 2 + (buf[0] & 1)); } if (n > skb_tailroom(skb)) { /* packet overflowed MRU */ ap->state |= SC_TOSS; } else { sp = skb_put(skb, n); memcpy(sp, buf, n); if (ap->state & SC_ESCAPE) { sp[0] ^= 0x20; ap->state &= ~SC_ESCAPE; } } } if (n >= count) break; c = buf[n]; if (flags != NULL && flags[n] != 0) { ap->state |= SC_TOSS; } else if (c == PPP_FLAG) { process_input_packet(ap); } else if (c == PPP_ESCAPE) { ap->state |= SC_ESCAPE; } else if (I_IXON(ap->tty)) { if (c == START_CHAR(ap->tty)) start_tty(ap->tty); else if (c == STOP_CHAR(ap->tty)) stop_tty(ap->tty); } /* otherwise it's a char in the recv ACCM */ ++n; buf += n; if (flags) flags += n; count -= n; } return; nomem: printk(KERN_ERR "PPPasync: no memory (input pkt)\n"); ap->state |= SC_TOSS; } /* * We look at LCP frames going past so that we can notice * and react to the LCP configure-ack from the peer. * In the situation where the peer has been sent a configure-ack * already, LCP is up once it has sent its configure-ack * so the immediately following packet can be sent with the * configured LCP options. This allows us to process the following * packet correctly without pppd needing to respond quickly. * * We only respond to the received configure-ack if we have just * sent a configure-request, and the configure-ack contains the * same data (this is checked using a 16-bit crc of the data). */ #define CONFREQ 1 /* LCP code field values */ #define CONFACK 2 #define LCP_MRU 1 /* LCP option numbers */ #define LCP_ASYNCMAP 2 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, int len, int inbound) { int dlen, fcs, i, code; u32 val; data += 2; /* skip protocol bytes */ len -= 2; if (len < 4) /* 4 = code, ID, length */ return; code = data[0]; if (code != CONFACK && code != CONFREQ) return; dlen = (data[2] << 8) + data[3]; if (len < dlen) return; /* packet got truncated or length is bogus */ if (code == (inbound? CONFACK: CONFREQ)) { /* * sent confreq or received confack: * calculate the crc of the data from the ID field on. */ fcs = PPP_INITFCS; for (i = 1; i < dlen; ++i) fcs = PPP_FCS(fcs, data[i]); if (!inbound) { /* outbound confreq - remember the crc for later */ ap->lcp_fcs = fcs; return; } /* received confack, check the crc */ fcs ^= ap->lcp_fcs; ap->lcp_fcs = -1; if (fcs != 0) return; } else if (inbound) return; /* not interested in received confreq */ /* process the options in the confack */ data += 4; dlen -= 4; /* data[0] is code, data[1] is length */ while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { switch (data[0]) { case LCP_MRU: val = (data[2] << 8) + data[3]; if (inbound) ap->mru = val; else ap->chan.mtu = val; break; case LCP_ASYNCMAP: val = (data[2] << 24) + (data[3] << 16) + (data[4] << 8) + data[5]; if (inbound) ap->raccm = val; else ap->xaccm[0] = val; break; } dlen -= data[1]; data += data[1]; } } static void __exit ppp_async_cleanup(void) { if (tty_unregister_ldisc(N_PPP) != 0) printk(KERN_ERR "failed to unregister PPP line discipline\n"); }