#include #include #include #include #include #include #include "private_data.h" #include "harddoom.h" #include "surface.h" #include "util.h" #include "harddoomdev.h" void free_texture(struct texture_data *texture_data); int texture_release(struct inode *inode, struct file *filp) { struct texture_data *texture_data; texture_data = filp->private_data; free_texture(texture_data); kfree(texture_data); return 0; } struct file_operations texture_fops = { .owner = THIS_MODULE, .release = texture_release }; int verify_texture_params(struct doomdev_ioctl_create_texture *params) { if (params->size > 4 * 1024 * 1024) { return -EOVERFLOW; } if (params->height > 1023) { return -EOVERFLOW; } return 0; } int alloc_texture(struct doomdev_ioctl_create_texture *params, struct texture_data *texture_data) { int err; int i; int not_written; int pages_needed; texture_data->size = params->size; texture_data->height = params->height; pages_needed = (params->size / HARDDOOM_PAGE_SIZE); if (params->size % HARDDOOM_PAGE_SIZE != 0) { pages_needed += 1; } texture_data->pages = pages_needed; texture_data->texture_cpu = dma_alloc_coherent(texture_data->doom_data->pci_device, params->size, &texture_data->texture_dev, GFP_KERNEL); ORFAIL_NULL(texture_data->texture_cpu, -ENOMEM, error_texture); texture_data->page_table_cpu = dma_alloc_coherent(texture_data->doom_data->pci_device, pages_needed * 4, &texture_data->page_table_dev, GFP_KERNEL); ORFAIL_NULL(texture_data->page_table_cpu, -ENOMEM, error_pt); for (i = 0; i < pages_needed; i++) { texture_data->page_table_cpu[i] = (HARDDOOM_PTE_PHYS_MASK & (texture_data->texture_dev + HARDDOOM_PAGE_SIZE * i)) | HARDDOOM_PTE_VALID; } not_written = copy_from_user(texture_data->texture_cpu, (void __user *) params->data_ptr, params->size); if (not_written) { err = -EFAULT; goto error_copy; } return 0; error_copy: dma_free_coherent(texture_data->doom_data->pci_device, texture_data->pages * 4, texture_data->page_table_cpu, texture_data->page_table_dev); error_pt: dma_free_coherent(texture_data->doom_data->pci_device, texture_data->size, texture_data->texture_cpu, texture_data->texture_dev); error_texture: return err; } void free_texture(struct texture_data *texture_data) { dma_free_coherent(texture_data->doom_data->pci_device, texture_data->pages * 4, texture_data->page_table_cpu, texture_data->page_table_dev); dma_free_coherent(texture_data->doom_data->pci_device, texture_data->size, texture_data->texture_cpu, texture_data->texture_dev); } int new_texture(struct file *filp, struct doomdev_ioctl_create_texture *params) { int err; struct texture_data *texture_data; int fd; struct doom_data *doom_data; err = verify_texture_params(params); if (err < 0) { return err; } texture_data = kmalloc(sizeof(*texture_data), GFP_KERNEL); ORFAIL_NULL(texture_data, -ENOMEM, error_data); doom_data = container_of(filp->f_inode->i_cdev, struct doom_data, cdev); texture_data->doom_data = doom_data; ORFAIL(alloc_texture(params, texture_data), error_texture); fd = anon_inode_getfd("doom_texture", &texture_fops, texture_data, 0); ORFAIL(fd, error_inode); return fd; error_inode: free_texture(texture_data); error_texture: kfree(texture_data); error_data: return err; } void free_flat(struct flat_data *flat_data); int flat_release(struct inode *inode, struct file *filp) { struct flat_data *flat_data; flat_data = filp->private_data; free_flat(flat_data); kfree(flat_data); return 0; } struct file_operations flat_fops = { .owner = THIS_MODULE, .release = flat_release }; int alloc_flat(struct doomdev_ioctl_create_flat *params, struct flat_data *flat_data) { int err; int not_written; flat_data->flat_cpu = dma_alloc_coherent(flat_data->doom_data->pci_device, HARDDOOM_FLAT_SIZE, &flat_data->flat_dev, GFP_KERNEL); ORFAIL_NULL(flat_data->flat_cpu, -ENOMEM, error_flat); not_written = copy_from_user(flat_data->flat_cpu, (void __user *) params->data_ptr, HARDDOOM_FLAT_SIZE); if (not_written) { err = -EFAULT; goto error_copy; } return 0; error_copy: dma_free_coherent(flat_data->doom_data->pci_device, HARDDOOM_FLAT_SIZE, flat_data->flat_cpu, flat_data->flat_dev); error_flat: return err; } void free_flat(struct flat_data *flat_data) { dma_free_coherent(flat_data->doom_data->pci_device, HARDDOOM_FLAT_SIZE, flat_data->flat_cpu, flat_data->flat_dev); } int new_flat(struct file *filp, struct doomdev_ioctl_create_flat *params) { int err; struct flat_data *flat_data; int fd; struct doom_data *doom_data; flat_data = kmalloc(sizeof(*flat_data), GFP_KERNEL); ORFAIL_NULL(flat_data, -ENOMEM, error_data); doom_data = container_of(filp->f_inode->i_cdev, struct doom_data, cdev); flat_data->doom_data = doom_data; ORFAIL(alloc_flat(params, flat_data), error_flat); fd = anon_inode_getfd("doom_flat", &flat_fops, flat_data, 0); ORFAIL(fd, error_inode); return fd; error_inode: free_flat(flat_data); error_flat: kfree(flat_data); error_data: return err; } void free_colors(struct colors_data *colors_data); int colors_release(struct inode *inode, struct file *filp) { struct colors_data *colors_data; colors_data = filp->private_data; free_colors(colors_data); kfree(colors_data); return 0; } struct file_operations colors_fops = { .owner = THIS_MODULE, .release = colors_release }; int alloc_colors(struct doomdev_ioctl_create_colormaps *params, struct colors_data *colors_data) { int err; int not_written; colors_data->number = params->num; colors_data->colors_cpu = dma_alloc_coherent(colors_data->doom_data->pci_device, HARDDOOM_COLORMAP_SIZE * params->num, &colors_data->colors_dev, GFP_KERNEL); ORFAIL_NULL(colors_data->colors_cpu, -ENOMEM, error_colors); not_written = copy_from_user(colors_data->colors_cpu, (void __user *) params->data_ptr, HARDDOOM_COLORMAP_SIZE * params->num); if (not_written) { err = -EFAULT; goto error_copy; } return 0; error_copy: dma_free_coherent(colors_data->doom_data->pci_device, HARDDOOM_COLORMAP_SIZE * params->num, colors_data->colors_cpu, colors_data->colors_dev); error_colors: return err; } void free_colors(struct colors_data *colors_data) { dma_free_coherent(colors_data->doom_data->pci_device, HARDDOOM_COLORMAP_SIZE * colors_data->number, colors_data->colors_cpu, colors_data->colors_dev); } int new_colors(struct file *filp, struct doomdev_ioctl_create_colormaps *params) { int err; struct colors_data *colors_data; int fd; struct doom_data *doom_data; colors_data = kmalloc(sizeof(*colors_data), GFP_KERNEL); ORFAIL_NULL(colors_data, -ENOMEM, error_data); doom_data = container_of(filp->f_inode->i_cdev, struct doom_data, cdev); colors_data->doom_data = doom_data; ORFAIL(alloc_colors(params, colors_data), error_colors); fd = anon_inode_getfd("doom_colors", &colors_fops, colors_data, 0); ORFAIL(fd, error_inode); return fd; error_inode: free_colors(colors_data); error_colors: kfree(colors_data); error_data: return err; } long draw_lines(struct file *filp, unsigned long arg) { struct surface_data *surface_data; struct doomdev_surf_ioctl_draw_lines param; struct doomdev_line *lines; struct doomdev_line line; int i; int err; int not_copied; surface_data = filp->private_data; not_copied = copy_from_user(¶m, (void __iomem *) arg, sizeof(param)); if (not_copied) { err = -EFAULT; goto error_param; } lines = (struct doomdev_line *) param.lines_ptr; mutex_lock(&surface_data->doom_data->cmd_mutex); for (i = 0; i < param.lines_num; i++) { not_copied = copy_from_user(&line, (void __iomem *) &lines[i], sizeof(line)); if (not_copied) { goto error_copy; } draw_line(surface_data, &lines[i]); } error_copy: err = i; mutex_unlock(&surface_data->doom_data->cmd_mutex); error_param: return err; } long fill_rects(struct file *filp, unsigned long arg) { struct surface_data *surface_data; struct doomdev_surf_ioctl_fill_rects param; struct doomdev_fill_rect *rects; struct doomdev_fill_rect rect; int i; int not_copied; int err; surface_data = filp->private_data; not_copied = copy_from_user(¶m, (void __iomem *) arg, sizeof(param)); if (not_copied) { err = -EFAULT; goto error_param; } rects = (struct doomdev_fill_rect *) param.rects_ptr; mutex_lock(&surface_data->doom_data->cmd_mutex); for (i = 0; i < param.rects_num; i++) { not_copied = copy_from_user(&rect, (void __iomem *) &rects[i], sizeof(rect)); if (not_copied) { goto error_copy; } fill_rect(surface_data, &rect); } error_copy: err = i; mutex_unlock(&surface_data->doom_data->cmd_mutex); error_param: return err; } struct file_operations surface_fops; long copy_rects(struct file *filp, unsigned long arg) { struct surface_data *dst_data; struct surface_data *src_data; struct doomdev_surf_ioctl_copy_rects param; struct doomdev_copy_rect *rects; struct doomdev_copy_rect rect; struct fd src_fds; int i; int err; int not_copied; dst_data = filp->private_data; not_copied = copy_from_user(¶m, (void __iomem *) arg, sizeof(param)); if (not_copied) { err = -EFAULT; goto error_param; } rects = (struct doomdev_copy_rect *) param.rects_ptr; src_fds = fdget(param.surf_src_fd); if (src_fds.file->f_op != &surface_fops) { err = -EINVAL; goto error_fdget; } src_data = src_fds.file->private_data; if (dst_data->doom_data != src_data->doom_data) { err = -EINVAL; goto error_fdget; } mutex_lock(&dst_data->doom_data->cmd_mutex); for (i = 0; i < param.rects_num; i++) { not_copied = copy_from_user(&rect, (void __iomem *) &rects[i], sizeof(rect)); if (not_copied) { goto error_copy; } copy_rect(dst_data, src_data, &rect); } error_copy: err = i; mutex_unlock(&dst_data->doom_data->cmd_mutex); error_fdget: fdput(src_fds); error_param: return err; } long draw_columns(struct file *filp, unsigned long arg) { struct doomdev_surf_ioctl_draw_columns param; struct surface_data *surface_data; struct texture_data *texture_data; struct colors_data *colors_data; struct colors_data *trans_data; struct doomdev_column *columns; struct doomdev_column column; struct fd texture_fds; struct fd colors_fds; struct fd trans_fds; bool got_colors = false; bool got_trans = false; bool got_texture = false; int not_copied; int i; int err; surface_data = filp->private_data; not_copied = copy_from_user(¶m, (void __iomem *) arg, sizeof(param)); if (not_copied) { err = -EFAULT; goto error_param; } if (param.draw_flags & DOOMDEV_DRAW_FLAGS_FUZZ || param.draw_flags & DOOMDEV_DRAW_FLAGS_COLORMAP) { if (param.draw_flags & DOOMDEV_DRAW_FLAGS_FUZZ) { param.draw_flags = DOOMDEV_DRAW_FLAGS_FUZZ; } colors_fds = fdget(param.colormaps_fd); got_colors = true; if (colors_fds.file->f_op != &colors_fops) { err = -EINVAL; goto error_fdget; } colors_data = colors_fds.file->private_data; if (surface_data->doom_data != colors_data->doom_data) { err = -EINVAL; goto error_fdget; } } if (param.draw_flags & DOOMDEV_DRAW_FLAGS_TRANSLATE) { trans_fds = fdget(param.translations_fd); got_trans = true; if (trans_fds.file->f_op != &colors_fops) { err = -EINVAL; goto error_fdget; } trans_data = trans_fds.file->private_data; if (surface_data->doom_data != trans_data->doom_data) { err = -EINVAL; goto error_fdget; } } texture_fds = fdget(param.texture_fd); got_texture = true; if (texture_fds.file->f_op != &texture_fops) { err = -EINVAL; goto error_fdget; } texture_data = texture_fds.file->private_data; if (surface_data->doom_data != texture_data->doom_data) { err = -EINVAL; goto error_fdget; } columns = (struct doomdev_column *) param.columns_ptr; mutex_lock(&surface_data->doom_data->cmd_mutex); for (i = 0; i < param.columns_num; i++) { not_copied = copy_from_user(&column, (void __iomem *) &columns[i], sizeof(column)); if (not_copied) { goto error_copy; } draw_column(surface_data, texture_data, &column, colors_data, trans_data, param.draw_flags, param.translation_idx); } error_copy: err = i; mutex_unlock(&surface_data->doom_data->cmd_mutex); error_fdget: if (got_texture) { fdput(texture_fds); } if (got_colors) { fdput(colors_fds); } if (got_trans) { fdput(trans_fds); } error_param: return err; } long draw_spans(struct file *filp, unsigned long arg) { struct doomdev_surf_ioctl_draw_spans param; struct surface_data *surface_data; struct flat_data *flat_data; struct doomdev_span *spans; struct doomdev_span span; struct fd flat_fds; int i; int err; int not_copied; surface_data = filp->private_data; not_copied = copy_from_user(¶m, (void __iomem *) arg, sizeof(param)); if (not_copied) { err = -EFAULT; goto error_param; } flat_fds = fdget(param.flat_fd); if (flat_fds.file->f_op != &flat_fops) { err = -EINVAL; goto error_fdget; } flat_data = flat_fds.file->private_data; if (surface_data->doom_data != flat_data->doom_data) { err = -EINVAL; goto error_fdget; } spans = (struct doomdev_span *) param.spans_ptr; mutex_lock(&surface_data->doom_data->cmd_mutex); for (i = 0; i < param.spans_num; i++) { not_copied = copy_from_user(&span, (void __iomem *) &spans[i], sizeof(span)); if (not_copied) { goto error_copy; } draw_span(surface_data, flat_data, &span); } error_copy: err = i; mutex_unlock(&surface_data->doom_data->cmd_mutex); error_fdget: fdput(flat_fds); error_param: return err; } long do_draw_background(struct file *filp, unsigned long arg) { struct doomdev_surf_ioctl_draw_background param; struct surface_data *surface_data; struct flat_data *flat_data; struct fd flat_fds; int err = 0; int not_written; surface_data = filp->private_data; not_written = copy_from_user(¶m, (void __iomem *) arg, sizeof(param)); if (not_written) { err = -EFAULT; goto error_param; } flat_fds = fdget(param.flat_fd); if (flat_fds.file->f_op != &flat_fops) { err = -EINVAL; goto error_fdget; } flat_data = flat_fds.file->private_data; if (surface_data->doom_data != flat_data->doom_data) { err = -EINVAL; goto error_fdget; } mutex_lock(&surface_data->doom_data->cmd_mutex); draw_background(surface_data, flat_data); mutex_unlock(&surface_data->doom_data->cmd_mutex); error_fdget: fdput(flat_fds); error_param: return err; } long surface_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case DOOMDEV_SURF_IOCTL_DRAW_LINES: return draw_lines(filp, arg); case DOOMDEV_SURF_IOCTL_FILL_RECTS: return fill_rects(filp, arg); case DOOMDEV_SURF_IOCTL_COPY_RECTS: return copy_rects(filp, arg); case DOOMDEV_SURF_IOCTL_DRAW_COLUMNS: return draw_columns(filp, arg); case DOOMDEV_SURF_IOCTL_DRAW_SPANS: return draw_spans(filp, arg); case DOOMDEV_SURF_IOCTL_DRAW_BACKGROUND: return do_draw_background(filp, arg); default: return -ENOTTY; } } ssize_t surface_read(struct file *filp, char __user *buf, size_t count, loff_t *offset) { struct surface_data *surface_data; unsigned long not_written; surface_data = (struct surface_data *) filp->private_data; if (*offset >= surface_data->surface_size || *offset < 0) { return 0; } if (*offset + count > surface_data->surface_size) { count = surface_data->surface_size - *offset; } mutex_lock(&surface_data->doom_data->ping_mutex); ping_sync(surface_data->doom_data); down(&surface_data->doom_data->pong_sem); not_written = copy_to_user(buf, surface_data->surface_cpu + (*offset), count); *offset += count - not_written; mutex_unlock(&surface_data->doom_data->ping_mutex); return count - not_written; } loff_t surface_llseek(struct file *filp, loff_t offset, int origin) { struct surface_data *surface_data; loff_t new_pos; surface_data = filp->private_data; switch (origin) { case SEEK_SET: new_pos = offset; break; case SEEK_CUR: new_pos = filp->f_pos + offset; break; case SEEK_END: new_pos = surface_data->surface_size + offset; break; default: return -EINVAL; } if (new_pos < 0) { return -EINVAL; } filp->f_pos = new_pos; return new_pos; } void free_surface_buffer(struct surface_data *surface_data); int surface_release(struct inode *inode, struct file *filp) { struct surface_data *surface_data; surface_data = filp->private_data; mutex_lock(&surface_data->doom_data->ping_mutex); ping_sync(surface_data->doom_data); down(&surface_data->doom_data->pong_sem); mutex_unlock(&surface_data->doom_data->ping_mutex); free_surface_buffer(surface_data); kfree(surface_data); return 0; } struct file_operations surface_fops = { .owner = THIS_MODULE, .unlocked_ioctl = surface_ioctl, .compat_ioctl = surface_ioctl, .llseek = surface_llseek, .read = surface_read, .release = surface_release }; int verify_params(struct doomdev_ioctl_create_surface *params) { if (params->width < 64) { return -EINVAL; } if (params->width > 2048) { return -EOVERFLOW; } if (params->width % 64 != 0) { return -EINVAL; } if (params->height < 1) { return -EINVAL; } if (params->height > 2048) { return -EOVERFLOW; } return 0; } int alloc_surface_buffer(struct doomdev_ioctl_create_surface *params, struct surface_data *surface_data) { int bytes_needed; int pages_needed; int i; int err; bytes_needed = params->width * params->height; surface_data->surface_size = bytes_needed; surface_data->width = params->width; surface_data->height = params->height; pages_needed = (bytes_needed / HARDDOOM_PAGE_SIZE); if (bytes_needed % HARDDOOM_PAGE_SIZE != 0) { pages_needed += 1; } if (pages_needed > 1024) { return -EOVERFLOW; } surface_data->pages = pages_needed; surface_data->surface_cpu = dma_alloc_coherent(surface_data->doom_data->pci_device, bytes_needed, &surface_data->surface_dev, GFP_KERNEL); ORFAIL_NULL(surface_data->surface_cpu, -ENOMEM, error_surface); surface_data->page_table_cpu = dma_alloc_coherent(surface_data->doom_data->pci_device, pages_needed * 4, &surface_data->page_table_dev, GFP_KERNEL); ORFAIL_NULL(surface_data->page_table_cpu, -ENOMEM, error_pt); for (i = 0; i < pages_needed; i++) { surface_data->page_table_cpu[i] = (HARDDOOM_PTE_PHYS_MASK & (surface_data->surface_dev + HARDDOOM_PAGE_SIZE * i)) | HARDDOOM_PTE_VALID; } return 0; error_pt: dma_free_coherent(surface_data->doom_data->pci_device, surface_data->surface_size, surface_data->surface_cpu, surface_data->surface_dev); error_surface: return err; } void free_surface_buffer(struct surface_data *surface_data) { dma_free_coherent(surface_data->doom_data->pci_device, surface_data->surface_size, surface_data->surface_cpu, surface_data->surface_dev); dma_free_coherent(surface_data->doom_data->pci_device, surface_data->pages * 4, surface_data->page_table_cpu, surface_data->page_table_dev); } int new_surface(struct file *filp, struct doomdev_ioctl_create_surface *params) { int err; int fd; struct fd fd_s; struct surface_data *surface_data; struct doom_data *doom_data; err = verify_params(params); if (err < 0) { return err; } surface_data = kmalloc(sizeof(*surface_data), GFP_KERNEL); ORFAIL_NULL(surface_data, -ENOMEM, error_data); doom_data = container_of(filp->f_inode->i_cdev, struct doom_data, cdev); surface_data->doom_data = doom_data; ORFAIL(alloc_surface_buffer(params, surface_data), error_buffer); fd = anon_inode_getfd("doom_surface", &surface_fops, surface_data, 0); fd_s = fdget(fd); if (fd_s.file->f_op != &surface_fops) { err = -ENOENT; goto error_fdget; } fd_s.file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; fdput(fd_s); return fd; error_fdget: free_surface_buffer(surface_data); error_buffer: kfree(surface_data); error_data: return err; }