Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm: Fix further issues in drivers/char/drm/via_irq.c
drivers/char/drm/drm_memory.c: possible cleanups
drm: deline a few large inlines in DRM code
drm: remove master setting from add/remove context
drm: drm_pci needs dma-mapping.h
[PATCH] drm: Fix issue reported by Coverity in drivers/char/drm/via_irq.c

+152 -133
+2 -2
drivers/char/drm/drmP.h
··· 815 815 extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); 816 816 extern void *drm_ioremap(unsigned long offset, unsigned long size, 817 817 drm_device_t * dev); 818 - extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size, 819 - drm_device_t * dev); 820 818 extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev); 821 819 822 820 extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type); ··· 1020 1022 map->handle = drm_ioremap(map->offset, map->size, dev); 1021 1023 } 1022 1024 1025 + #if 0 1023 1026 static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, 1024 1027 struct drm_device *dev) 1025 1028 { 1026 1029 map->handle = drm_ioremap_nocache(map->offset, map->size, dev); 1027 1030 } 1031 + #endif /* 0 */ 1028 1032 1029 1033 static __inline__ void drm_core_ioremapfree(struct drm_map *map, 1030 1034 struct drm_device *dev)
+2 -2
drivers/char/drm/drm_drv.c
··· 75 75 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 76 76 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, 77 77 78 - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 79 - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 78 + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY}, 79 + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY}, 80 80 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 81 81 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, 82 82 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+134
drivers/char/drm/drm_memory.c
··· 80 80 } 81 81 82 82 #if __OS_HAS_AGP 83 + /* 84 + * Find the drm_map that covers the range [offset, offset+size). 85 + */ 86 + static drm_map_t *drm_lookup_map(unsigned long offset, 87 + unsigned long size, drm_device_t * dev) 88 + { 89 + struct list_head *list; 90 + drm_map_list_t *r_list; 91 + drm_map_t *map; 92 + 93 + list_for_each(list, &dev->maplist->head) { 94 + r_list = (drm_map_list_t *) list; 95 + map = r_list->map; 96 + if (!map) 97 + continue; 98 + if (map->offset <= offset 99 + && (offset + size) <= (map->offset + map->size)) 100 + return map; 101 + } 102 + return NULL; 103 + } 104 + 105 + static void *agp_remap(unsigned long offset, unsigned long size, 106 + drm_device_t * dev) 107 + { 108 + unsigned long *phys_addr_map, i, num_pages = 109 + PAGE_ALIGN(size) / PAGE_SIZE; 110 + struct drm_agp_mem *agpmem; 111 + struct page **page_map; 112 + void *addr; 113 + 114 + size = PAGE_ALIGN(size); 115 + 116 + #ifdef __alpha__ 117 + offset -= dev->hose->mem_space->start; 118 + #endif 119 + 120 + for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) 121 + if (agpmem->bound <= offset 122 + && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= 123 + (offset + size)) 124 + break; 125 + if (!agpmem) 126 + return NULL; 127 + 128 + /* 129 + * OK, we're mapping AGP space on a chipset/platform on which memory accesses by 130 + * the CPU do not get remapped by the GART. We fix this by using the kernel's 131 + * page-table instead (that's probably faster anyhow...). 132 + */ 133 + /* note: use vmalloc() because num_pages could be large... */ 134 + page_map = vmalloc(num_pages * sizeof(struct page *)); 135 + if (!page_map) 136 + return NULL; 137 + 138 + phys_addr_map = 139 + agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; 140 + for (i = 0; i < num_pages; ++i) 141 + page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); 142 + addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); 143 + vfree(page_map); 144 + 145 + return addr; 146 + } 147 + 83 148 /** Wrapper around agp_allocate_memory() */ 84 149 DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type) 85 150 { ··· 168 103 { 169 104 return drm_agp_unbind_memory(handle); 170 105 } 106 + 107 + #else /* __OS_HAS_AGP */ 108 + 109 + static inline drm_map_t *drm_lookup_map(unsigned long offset, 110 + unsigned long size, drm_device_t * dev) 111 + { 112 + return NULL; 113 + } 114 + 115 + static inline void *agp_remap(unsigned long offset, unsigned long size, 116 + drm_device_t * dev) 117 + { 118 + return NULL; 119 + } 120 + 171 121 #endif /* agp */ 122 + 123 + void *drm_ioremap(unsigned long offset, unsigned long size, 124 + drm_device_t * dev) 125 + { 126 + if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { 127 + drm_map_t *map = drm_lookup_map(offset, size, dev); 128 + 129 + if (map && map->type == _DRM_AGP) 130 + return agp_remap(offset, size, dev); 131 + } 132 + return ioremap(offset, size); 133 + } 134 + EXPORT_SYMBOL(drm_ioremap); 135 + 136 + #if 0 137 + void *drm_ioremap_nocache(unsigned long offset, 138 + unsigned long size, drm_device_t * dev) 139 + { 140 + if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { 141 + drm_map_t *map = drm_lookup_map(offset, size, dev); 142 + 143 + if (map && map->type == _DRM_AGP) 144 + return agp_remap(offset, size, dev); 145 + } 146 + return ioremap_nocache(offset, size); 147 + } 148 + #endif /* 0 */ 149 + 150 + void drm_ioremapfree(void *pt, unsigned long size, 151 + drm_device_t * dev) 152 + { 153 + /* 154 + * This is a bit ugly. It would be much cleaner if the DRM API would use separate 155 + * routines for handling mappings in the AGP space. Hopefully this can be done in 156 + * a future revision of the interface... 157 + */ 158 + if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture 159 + && ((unsigned long)pt >= VMALLOC_START 160 + && (unsigned long)pt < VMALLOC_END)) { 161 + unsigned long offset; 162 + drm_map_t *map; 163 + 164 + offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK); 165 + map = drm_lookup_map(offset, size, dev); 166 + if (map && map->type == _DRM_AGP) { 167 + vunmap(pt); 168 + return; 169 + } 170 + } 171 + 172 + iounmap(pt); 173 + } 174 + EXPORT_SYMBOL(drm_ioremapfree); 175 + 172 176 #endif /* debug_memory */
+4 -124
drivers/char/drm/drm_memory.h
··· 57 57 # endif 58 58 #endif 59 59 60 - /* 61 - * Find the drm_map that covers the range [offset, offset+size). 62 - */ 63 - static inline drm_map_t *drm_lookup_map(unsigned long offset, 64 - unsigned long size, drm_device_t * dev) 65 - { 66 - struct list_head *list; 67 - drm_map_list_t *r_list; 68 - drm_map_t *map; 69 - 70 - list_for_each(list, &dev->maplist->head) { 71 - r_list = (drm_map_list_t *) list; 72 - map = r_list->map; 73 - if (!map) 74 - continue; 75 - if (map->offset <= offset 76 - && (offset + size) <= (map->offset + map->size)) 77 - return map; 78 - } 79 - return NULL; 80 - } 81 - 82 - static inline void *agp_remap(unsigned long offset, unsigned long size, 83 - drm_device_t * dev) 84 - { 85 - unsigned long *phys_addr_map, i, num_pages = 86 - PAGE_ALIGN(size) / PAGE_SIZE; 87 - struct drm_agp_mem *agpmem; 88 - struct page **page_map; 89 - void *addr; 90 - 91 - size = PAGE_ALIGN(size); 92 - 93 - #ifdef __alpha__ 94 - offset -= dev->hose->mem_space->start; 95 - #endif 96 - 97 - for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) 98 - if (agpmem->bound <= offset 99 - && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= 100 - (offset + size)) 101 - break; 102 - if (!agpmem) 103 - return NULL; 104 - 105 - /* 106 - * OK, we're mapping AGP space on a chipset/platform on which memory accesses by 107 - * the CPU do not get remapped by the GART. We fix this by using the kernel's 108 - * page-table instead (that's probably faster anyhow...). 109 - */ 110 - /* note: use vmalloc() because num_pages could be large... */ 111 - page_map = vmalloc(num_pages * sizeof(struct page *)); 112 - if (!page_map) 113 - return NULL; 114 - 115 - phys_addr_map = 116 - agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; 117 - for (i = 0; i < num_pages; ++i) 118 - page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); 119 - addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); 120 - vfree(page_map); 121 - 122 - return addr; 123 - } 124 - 125 60 static inline unsigned long drm_follow_page(void *vaddr) 126 61 { 127 62 pgd_t *pgd = pgd_offset_k((unsigned long)vaddr); ··· 68 133 69 134 #else /* __OS_HAS_AGP */ 70 135 71 - static inline drm_map_t *drm_lookup_map(unsigned long offset, 72 - unsigned long size, drm_device_t * dev) 73 - { 74 - return NULL; 75 - } 76 - 77 - static inline void *agp_remap(unsigned long offset, unsigned long size, 78 - drm_device_t * dev) 79 - { 80 - return NULL; 81 - } 82 - 83 136 static inline unsigned long drm_follow_page(void *vaddr) 84 137 { 85 138 return 0; ··· 75 152 76 153 #endif 77 154 78 - static inline void *drm_ioremap(unsigned long offset, unsigned long size, 79 - drm_device_t * dev) 80 - { 81 - if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { 82 - drm_map_t *map = drm_lookup_map(offset, size, dev); 155 + void *drm_ioremap(unsigned long offset, unsigned long size, 156 + drm_device_t * dev); 83 157 84 - if (map && map->type == _DRM_AGP) 85 - return agp_remap(offset, size, dev); 86 - } 87 - return ioremap(offset, size); 88 - } 89 - 90 - static inline void *drm_ioremap_nocache(unsigned long offset, 91 - unsigned long size, drm_device_t * dev) 92 - { 93 - if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { 94 - drm_map_t *map = drm_lookup_map(offset, size, dev); 95 - 96 - if (map && map->type == _DRM_AGP) 97 - return agp_remap(offset, size, dev); 98 - } 99 - return ioremap_nocache(offset, size); 100 - } 101 - 102 - static inline void drm_ioremapfree(void *pt, unsigned long size, 103 - drm_device_t * dev) 104 - { 105 - /* 106 - * This is a bit ugly. It would be much cleaner if the DRM API would use separate 107 - * routines for handling mappings in the AGP space. Hopefully this can be done in 108 - * a future revision of the interface... 109 - */ 110 - if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture 111 - && ((unsigned long)pt >= VMALLOC_START 112 - && (unsigned long)pt < VMALLOC_END)) { 113 - unsigned long offset; 114 - drm_map_t *map; 115 - 116 - offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK); 117 - map = drm_lookup_map(offset, size, dev); 118 - if (map && map->type == _DRM_AGP) { 119 - vunmap(pt); 120 - return; 121 - } 122 - } 123 - 124 - iounmap(pt); 125 - } 158 + void drm_ioremapfree(void *pt, unsigned long size, 159 + drm_device_t * dev);
+2
drivers/char/drm/drm_memory_debug.h
··· 229 229 return pt; 230 230 } 231 231 232 + #if 0 232 233 void *drm_ioremap_nocache (unsigned long offset, unsigned long size, 233 234 drm_device_t * dev) { 234 235 void *pt; ··· 252 251 spin_unlock(&drm_mem_lock); 253 252 return pt; 254 253 } 254 + #endif /* 0 */ 255 255 256 256 void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) { 257 257 int alloc_count;
+1
drivers/char/drm/drm_pci.c
··· 37 37 */ 38 38 39 39 #include <linux/pci.h> 40 + #include <linux/dma-mapping.h> 40 41 #include "drmP.h" 41 42 42 43 /**********************************************************************/
+7 -5
drivers/char/drm/via_irq.c
··· 196 196 { 197 197 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 198 198 unsigned int cur_irq_sequence; 199 - drm_via_irq_t *cur_irq = dev_priv->via_irqs; 199 + drm_via_irq_t *cur_irq; 200 200 int ret = 0; 201 - maskarray_t *masks = dev_priv->irq_masks; 201 + maskarray_t *masks; 202 202 int real_irq; 203 203 204 204 DRM_DEBUG("%s\n", __FUNCTION__); ··· 221 221 __FUNCTION__, irq); 222 222 return DRM_ERR(EINVAL); 223 223 } 224 - 225 - cur_irq += real_irq; 224 + 225 + masks = dev_priv->irq_masks; 226 + cur_irq = dev_priv->via_irqs + real_irq; 226 227 227 228 if (masks[real_irq][2] && !force_sequence) { 228 229 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, ··· 248 247 { 249 248 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 250 249 u32 status; 251 - drm_via_irq_t *cur_irq = dev_priv->via_irqs; 250 + drm_via_irq_t *cur_irq; 252 251 int i; 253 252 254 253 DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv); 255 254 if (dev_priv) { 255 + cur_irq = dev_priv->via_irqs; 256 256 257 257 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; 258 258 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;