aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/drm/radeon_state.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/drm/radeon_state.c')
-rw-r--r--drivers/char/drm/radeon_state.c184
1 files changed, 72 insertions, 112 deletions
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 9e816c63a8a3..e9d8ec3a0994 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1,5 +1,5 @@
1/* radeon_state.c -- State support for Radeon -*- linux-c -*- 1/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
2 * 2/*
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California. 3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
@@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
72 72
73 case RADEON_EMIT_PP_MISC: 73 case RADEON_EMIT_PP_MISC:
74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
75 &data[(RADEON_RB3D_DEPTHOFFSET 75 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
76 -
77 RADEON_PP_MISC) /
78 4])) {
79 DRM_ERROR("Invalid depth buffer offset\n"); 76 DRM_ERROR("Invalid depth buffer offset\n");
80 return DRM_ERR(EINVAL); 77 return DRM_ERR(EINVAL);
81 } 78 }
@@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
83 80
84 case RADEON_EMIT_PP_CNTL: 81 case RADEON_EMIT_PP_CNTL:
85 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 82 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
86 &data[(RADEON_RB3D_COLOROFFSET 83 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
87 -
88 RADEON_PP_CNTL) /
89 4])) {
90 DRM_ERROR("Invalid colour buffer offset\n"); 84 DRM_ERROR("Invalid colour buffer offset\n");
91 return DRM_ERR(EINVAL); 85 return DRM_ERR(EINVAL);
92 } 86 }
@@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
109 case RADEON_EMIT_PP_TXFILTER_1: 103 case RADEON_EMIT_PP_TXFILTER_1:
110 case RADEON_EMIT_PP_TXFILTER_2: 104 case RADEON_EMIT_PP_TXFILTER_2:
111 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, 105 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
112 &data[(RADEON_PP_TXOFFSET_0 106 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
113 -
114 RADEON_PP_TXFILTER_0) /
115 4])) {
116 DRM_ERROR("Invalid R100 texture offset\n"); 107 DRM_ERROR("Invalid R100 texture offset\n");
117 return DRM_ERR(EINVAL); 108 return DRM_ERR(EINVAL);
118 } 109 }
@@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
126 case R200_EMIT_PP_CUBIC_OFFSETS_5:{ 117 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
127 int i; 118 int i;
128 for (i = 0; i < 5; i++) { 119 for (i = 0; i < 5; i++) {
129 if (radeon_check_and_fixup_offset 120 if (radeon_check_and_fixup_offset(dev_priv,
130 (dev_priv, filp_priv, &data[i])) { 121 filp_priv,
122 &data[i])) {
131 DRM_ERROR 123 DRM_ERROR
132 ("Invalid R200 cubic texture offset\n"); 124 ("Invalid R200 cubic texture offset\n");
133 return DRM_ERR(EINVAL); 125 return DRM_ERR(EINVAL);
@@ -239,8 +231,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
239 231
240static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * 232static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
241 dev_priv, 233 dev_priv,
242 drm_file_t * filp_priv, 234 drm_file_t *filp_priv,
243 drm_radeon_kcmd_buffer_t *cmdbuf, 235 drm_radeon_kcmd_buffer_t *
236 cmdbuf,
244 unsigned int *cmdsz) 237 unsigned int *cmdsz)
245{ 238{
246 u32 *cmd = (u32 *) cmdbuf->buf; 239 u32 *cmd = (u32 *) cmdbuf->buf;
@@ -555,7 +548,8 @@ static struct {
555 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"}, 548 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
556 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"}, 549 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
557 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"}, 550 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
558 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"}, 551 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
552 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
559 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"}, 553 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
560 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"}, 554 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
561 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"}, 555 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
@@ -569,7 +563,7 @@ static struct {
569 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, 563 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
570 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"}, 564 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
571 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */ 565 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
572 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */ 566 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
573 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"}, 567 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
574 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"}, 568 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
575 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"}, 569 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
@@ -592,7 +586,7 @@ static struct {
592 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"}, 586 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
593 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"}, 587 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
594 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"}, 588 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
595 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */ 589 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
596 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"}, 590 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
597 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"}, 591 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
598 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"}, 592 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
@@ -985,8 +979,8 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
985 * rendering a quad into just those buffers. Thus, we have to 979 * rendering a quad into just those buffers. Thus, we have to
986 * make sure the 3D engine is configured correctly. 980 * make sure the 3D engine is configured correctly.
987 */ 981 */
988 if ((dev_priv->microcode_version == UCODE_R200) && 982 else if ((dev_priv->microcode_version == UCODE_R200) &&
989 (flags & (RADEON_DEPTH | RADEON_STENCIL))) { 983 (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
990 984
991 int tempPP_CNTL; 985 int tempPP_CNTL;
992 int tempRE_CNTL; 986 int tempRE_CNTL;
@@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1637 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); 1631 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
1638 dwords = size / 4; 1632 dwords = size / 4;
1639 1633
1634#define RADEON_COPY_MT(_buf, _data, _width) \
1635 do { \
1636 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1637 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1638 return DRM_ERR(EFAULT); \
1639 } \
1640 } while(0)
1641
1640 if (microtile) { 1642 if (microtile) {
1641 /* texture micro tiling in use, minimum texture width is thus 16 bytes. 1643 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
1642 however, we cannot use blitter directly for texture width < 64 bytes, 1644 however, we cannot use blitter directly for texture width < 64 bytes,
@@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1648 from user space. */ 1650 from user space. */
1649 if (tex->height == 1) { 1651 if (tex->height == 1) {
1650 if (tex_width >= 64 || tex_width <= 16) { 1652 if (tex_width >= 64 || tex_width <= 16) {
1651 if (DRM_COPY_FROM_USER(buffer, data, 1653 RADEON_COPY_MT(buffer, data,
1652 tex_width * 1654 tex_width * sizeof(u32));
1653 sizeof(u32))) {
1654 DRM_ERROR
1655 ("EFAULT on pad, %d bytes\n",
1656 tex_width);
1657 return DRM_ERR(EFAULT);
1658 }
1659 } else if (tex_width == 32) { 1655 } else if (tex_width == 32) {
1660 if (DRM_COPY_FROM_USER 1656 RADEON_COPY_MT(buffer, data, 16);
1661 (buffer, data, 16)) { 1657 RADEON_COPY_MT(buffer + 8,
1662 DRM_ERROR 1658 data + 16, 16);
1663 ("EFAULT on pad, %d bytes\n",
1664 tex_width);
1665 return DRM_ERR(EFAULT);
1666 }
1667 if (DRM_COPY_FROM_USER
1668 (buffer + 8, data + 16, 16)) {
1669 DRM_ERROR
1670 ("EFAULT on pad, %d bytes\n",
1671 tex_width);
1672 return DRM_ERR(EFAULT);
1673 }
1674 } 1659 }
1675 } else if (tex_width >= 64 || tex_width == 16) { 1660 } else if (tex_width >= 64 || tex_width == 16) {
1676 if (DRM_COPY_FROM_USER(buffer, data, 1661 RADEON_COPY_MT(buffer, data,
1677 dwords * sizeof(u32))) { 1662 dwords * sizeof(u32));
1678 DRM_ERROR("EFAULT on data, %d dwords\n",
1679 dwords);
1680 return DRM_ERR(EFAULT);
1681 }
1682 } else if (tex_width < 16) { 1663 } else if (tex_width < 16) {
1683 for (i = 0; i < tex->height; i++) { 1664 for (i = 0; i < tex->height; i++) {
1684 if (DRM_COPY_FROM_USER 1665 RADEON_COPY_MT(buffer, data, tex_width);
1685 (buffer, data, tex_width)) {
1686 DRM_ERROR
1687 ("EFAULT on pad, %d bytes\n",
1688 tex_width);
1689 return DRM_ERR(EFAULT);
1690 }
1691 buffer += 4; 1666 buffer += 4;
1692 data += tex_width; 1667 data += tex_width;
1693 } 1668 }
@@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1695 /* TODO: make sure this works when not fitting in one buffer 1670 /* TODO: make sure this works when not fitting in one buffer
1696 (i.e. 32bytes x 2048...) */ 1671 (i.e. 32bytes x 2048...) */
1697 for (i = 0; i < tex->height; i += 2) { 1672 for (i = 0; i < tex->height; i += 2) {
1698 if (DRM_COPY_FROM_USER 1673 RADEON_COPY_MT(buffer, data, 16);
1699 (buffer, data, 16)) {
1700 DRM_ERROR
1701 ("EFAULT on pad, %d bytes\n",
1702 tex_width);
1703 return DRM_ERR(EFAULT);
1704 }
1705 data += 16; 1674 data += 16;
1706 if (DRM_COPY_FROM_USER 1675 RADEON_COPY_MT(buffer + 8, data, 16);
1707 (buffer + 8, data, 16)) {
1708 DRM_ERROR
1709 ("EFAULT on pad, %d bytes\n",
1710 tex_width);
1711 return DRM_ERR(EFAULT);
1712 }
1713 data += 16; 1676 data += 16;
1714 if (DRM_COPY_FROM_USER 1677 RADEON_COPY_MT(buffer + 4, data, 16);
1715 (buffer + 4, data, 16)) {
1716 DRM_ERROR
1717 ("EFAULT on pad, %d bytes\n",
1718 tex_width);
1719 return DRM_ERR(EFAULT);
1720 }
1721 data += 16; 1678 data += 16;
1722 if (DRM_COPY_FROM_USER 1679 RADEON_COPY_MT(buffer + 12, data, 16);
1723 (buffer + 12, data, 16)) {
1724 DRM_ERROR
1725 ("EFAULT on pad, %d bytes\n",
1726 tex_width);
1727 return DRM_ERR(EFAULT);
1728 }
1729 data += 16; 1680 data += 16;
1730 buffer += 16; 1681 buffer += 16;
1731 } 1682 }
@@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
1735 /* Texture image width is larger than the minimum, so we 1686 /* Texture image width is larger than the minimum, so we
1736 * can upload it directly. 1687 * can upload it directly.
1737 */ 1688 */
1738 if (DRM_COPY_FROM_USER(buffer, data, 1689 RADEON_COPY_MT(buffer, data,
1739 dwords * sizeof(u32))) { 1690 dwords * sizeof(u32));
1740 DRM_ERROR("EFAULT on data, %d dwords\n",
1741 dwords);
1742 return DRM_ERR(EFAULT);
1743 }
1744 } else { 1691 } else {
1745 /* Texture image width is less than the minimum, so we 1692 /* Texture image width is less than the minimum, so we
1746 * need to pad out each image scanline to the minimum 1693 * need to pad out each image scanline to the minimum
1747 * width. 1694 * width.
1748 */ 1695 */
1749 for (i = 0; i < tex->height; i++) { 1696 for (i = 0; i < tex->height; i++) {
1750 if (DRM_COPY_FROM_USER 1697 RADEON_COPY_MT(buffer, data, tex_width);
1751 (buffer, data, tex_width)) {
1752 DRM_ERROR
1753 ("EFAULT on pad, %d bytes\n",
1754 tex_width);
1755 return DRM_ERR(EFAULT);
1756 }
1757 buffer += 8; 1698 buffer += 8;
1758 data += tex_width; 1699 data += tex_width;
1759 } 1700 }
1760 } 1701 }
1761 } 1702 }
1762 1703
1704#undef RADEON_COPY_MT
1763 buf->filp = filp; 1705 buf->filp = filp;
1764 buf->used = size; 1706 buf->used = size;
1765 offset = dev_priv->gart_buffers_offset + buf->offset; 1707 offset = dev_priv->gart_buffers_offset + buf->offset;
@@ -1821,7 +1763,7 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1821} 1763}
1822 1764
1823static void radeon_apply_surface_regs(int surf_index, 1765static void radeon_apply_surface_regs(int surf_index,
1824 drm_radeon_private_t * dev_priv) 1766 drm_radeon_private_t *dev_priv)
1825{ 1767{
1826 if (!dev_priv->mmio) 1768 if (!dev_priv->mmio)
1827 return; 1769 return;
@@ -1847,8 +1789,8 @@ static void radeon_apply_surface_regs(int surf_index,
1847 * freed, we suddenly need two surfaces to store A and C, which might 1789 * freed, we suddenly need two surfaces to store A and C, which might
1848 * not always be available. 1790 * not always be available.
1849 */ 1791 */
1850static int alloc_surface(drm_radeon_surface_alloc_t * new, 1792static int alloc_surface(drm_radeon_surface_alloc_t *new,
1851 drm_radeon_private_t * dev_priv, DRMFILE filp) 1793 drm_radeon_private_t *dev_priv, DRMFILE filp)
1852{ 1794{
1853 struct radeon_virt_surface *s; 1795 struct radeon_virt_surface *s;
1854 int i; 1796 int i;
@@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2158 2100
2159 LOCK_TEST_WITH_RETURN(dev, filp); 2101 LOCK_TEST_WITH_RETURN(dev, filp);
2160 2102
2103 if (!dev_priv) {
2104 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2105 return DRM_ERR(EINVAL);
2106 }
2107
2161 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp); 2108 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2162 2109
2163 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data, 2110 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@@ -2596,9 +2543,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2596 return 0; 2543 return 0;
2597} 2544}
2598 2545
2599static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv, 2546static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2600 drm_radeon_cmd_header_t header, 2547 drm_radeon_cmd_header_t header,
2601 drm_radeon_kcmd_buffer_t * cmdbuf) 2548 drm_radeon_kcmd_buffer_t *cmdbuf)
2602{ 2549{
2603 int sz = header.scalars.count; 2550 int sz = header.scalars.count;
2604 int start = header.scalars.offset; 2551 int start = header.scalars.offset;
@@ -2618,9 +2565,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
2618 2565
2619/* God this is ugly 2566/* God this is ugly
2620 */ 2567 */
2621static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv, 2568static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2622 drm_radeon_cmd_header_t header, 2569 drm_radeon_cmd_header_t header,
2623 drm_radeon_kcmd_buffer_t * cmdbuf) 2570 drm_radeon_kcmd_buffer_t *cmdbuf)
2624{ 2571{
2625 int sz = header.scalars.count; 2572 int sz = header.scalars.count;
2626 int start = ((unsigned int)header.scalars.offset) + 0x100; 2573 int start = ((unsigned int)header.scalars.offset) + 0x100;
@@ -2638,9 +2585,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
2638 return 0; 2585 return 0;
2639} 2586}
2640 2587
2641static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv, 2588static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2642 drm_radeon_cmd_header_t header, 2589 drm_radeon_cmd_header_t header,
2643 drm_radeon_kcmd_buffer_t * cmdbuf) 2590 drm_radeon_kcmd_buffer_t *cmdbuf)
2644{ 2591{
2645 int sz = header.vectors.count; 2592 int sz = header.vectors.count;
2646 int start = header.vectors.offset; 2593 int start = header.vectors.offset;
@@ -2685,8 +2632,8 @@ static int radeon_emit_packet3(drm_device_t * dev,
2685 return 0; 2632 return 0;
2686} 2633}
2687 2634
2688static int radeon_emit_packet3_cliprect(drm_device_t * dev, 2635static int radeon_emit_packet3_cliprect(drm_device_t *dev,
2689 drm_file_t * filp_priv, 2636 drm_file_t *filp_priv,
2690 drm_radeon_kcmd_buffer_t *cmdbuf, 2637 drm_radeon_kcmd_buffer_t *cmdbuf,
2691 int orig_nbox) 2638 int orig_nbox)
2692{ 2639{
@@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2818 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER); 2765 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
2819 if (kbuf == NULL) 2766 if (kbuf == NULL)
2820 return DRM_ERR(ENOMEM); 2767 return DRM_ERR(ENOMEM);
2821 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) { 2768 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
2769 cmdbuf.bufsz)) {
2822 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER); 2770 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2823 return DRM_ERR(EFAULT); 2771 return DRM_ERR(EFAULT);
2824 } 2772 }
@@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
2981 value = dev_priv->gart_vm_start; 2929 value = dev_priv->gart_vm_start;
2982 break; 2930 break;
2983 case RADEON_PARAM_REGISTER_HANDLE: 2931 case RADEON_PARAM_REGISTER_HANDLE:
2984 value = dev_priv->mmio_offset; 2932 value = dev_priv->mmio->offset;
2985 break; 2933 break;
2986 case RADEON_PARAM_STATUS_HANDLE: 2934 case RADEON_PARAM_STATUS_HANDLE:
2987 value = dev_priv->ring_rptr_offset; 2935 value = dev_priv->ring_rptr_offset;
@@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
3004 case RADEON_PARAM_GART_TEX_HANDLE: 2952 case RADEON_PARAM_GART_TEX_HANDLE:
3005 value = dev_priv->gart_textures_offset; 2953 value = dev_priv->gart_textures_offset;
3006 break; 2954 break;
2955
2956 case RADEON_PARAM_CARD_TYPE:
2957 if (dev_priv->flags & CHIP_IS_PCIE)
2958 value = RADEON_CARD_PCIE;
2959 else if (dev_priv->flags & CHIP_IS_AGP)
2960 value = RADEON_CARD_AGP;
2961 else
2962 value = RADEON_CARD_PCI;
2963 break;
3007 default: 2964 default:
3008 return DRM_ERR(EINVAL); 2965 return DRM_ERR(EINVAL);
3009 } 2966 }
@@ -3066,6 +3023,7 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
3066/* When a client dies: 3023/* When a client dies:
3067 * - Check for and clean up flipped page state 3024 * - Check for and clean up flipped page state
3068 * - Free any alloced GART memory. 3025 * - Free any alloced GART memory.
3026 * - Free any alloced radeon surfaces.
3069 * 3027 *
3070 * DRM infrastructure takes care of reclaiming dma buffers. 3028 * DRM infrastructure takes care of reclaiming dma buffers.
3071 */ 3029 */
@@ -3092,6 +3050,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
3092 drm_radeon_private_t *dev_priv = dev->dev_private; 3050 drm_radeon_private_t *dev_priv = dev->dev_private;
3093 struct drm_radeon_driver_file_fields *radeon_priv; 3051 struct drm_radeon_driver_file_fields *radeon_priv;
3094 3052
3053 DRM_DEBUG("\n");
3095 radeon_priv = 3054 radeon_priv =
3096 (struct drm_radeon_driver_file_fields *) 3055 (struct drm_radeon_driver_file_fields *)
3097 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES); 3056 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
@@ -3100,6 +3059,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
3100 return -ENOMEM; 3059 return -ENOMEM;
3101 3060
3102 filp_priv->driver_priv = radeon_priv; 3061 filp_priv->driver_priv = radeon_priv;
3062
3103 if (dev_priv) 3063 if (dev_priv)
3104 radeon_priv->radeon_fb_delta = dev_priv->fb_location; 3064 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
3105 else 3065 else