From: <ant...@ko...> - 2016-09-30 06:53:52
|
From: Anton Ivanov <ai...@ko...> UBD at present is extremely slow because it handles only one request at a time in the IO thread and IRQ handler. The single request at a time is replaced by handling multiple requests as well as necessary workarounds for short reads/writes. Resulting performance improvement in disk IO - 30% Signed-off-by: Anton Ivanov <ai...@ko...> --- arch/um/drivers/ubd.h | 5 ++ arch/um/drivers/ubd_kern.c | 161 ++++++++++++++++++++++++++++++++++++++------- arch/um/drivers/ubd_user.c | 19 +++++- 3 files changed, 159 insertions(+), 26 deletions(-) diff --git a/arch/um/drivers/ubd.h b/arch/um/drivers/ubd.h index 3b48cd2..cc1cc85 100644 --- a/arch/um/drivers/ubd.h +++ b/arch/um/drivers/ubd.h @@ -11,5 +11,10 @@ extern int start_io_thread(unsigned long sp, int *fds_out); extern int io_thread(void *arg); extern int kernel_fd; +extern int ubd_read_poll(int timeout); +extern int ubd_write_poll(int timeout); + +#define UBD_REQ_BUFFER_SIZE 64 + #endif diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index f354027..3f610df 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1,4 +1,5 @@ /* + * Copyright (C) 2015-2016 Anton Ivanov (ai...@br...) * Copyright (C) 2000 Jeff Dike (jd...@ka...) * Licensed under the GPL */ @@ -58,6 +59,17 @@ struct io_thread_req { int error; }; + +static struct io_thread_req* (*irq_req_buffer) []; +static struct io_thread_req * irq_remainder; +static int irq_remainder_size; + +static struct io_thread_req* (*io_req_buffer) []; +static struct io_thread_req * io_remainder; +static int io_remainder_size; + + + static inline int ubd_test_bit(__u64 bit, unsigned char *data) { __u64 n; @@ -442,29 +454,88 @@ static void do_ubd_request(struct request_queue * q); static int thread_fd = -1; static LIST_HEAD(restart); -/* XXX - move this inside ubd_intr. */ +/* Function to read several request pointers at a time +* handling fractional reads if (and as) needed +*/ + +static int bulk_req_safe_read( + int fd, + struct io_thread_req * (*request_buffer) [], + struct io_thread_req ** remainder, + int * remainder_size, + int max_recs + ) +{ + int n = 0; + int res = 0; + + if (*remainder_size > 0) { + memmove( + (char *) request_buffer, + (char *) remainder, * remainder_size + ); + n = * remainder_size; + } + + res = os_read_file( + fd, + ((char *) request_buffer) + * remainder_size, + sizeof(struct io_thread_req *) * max_recs - *remainder_size + ); + if (res > 0) { + n += res; + if ((n % sizeof(struct io_thread_req *)) > 0) { + /* + * Read somehow returned not a multiple of dword + * theoretically possible, but never observed in the + * wild, so read routine must be able to handle it + */ + *remainder_size = n % sizeof(struct io_thread_req *); + memmove( + remainder, + ((char *) request_buffer) + n/sizeof(struct io_thread_req *), + *remainder_size + ); + n = n - *remainder_size; + } + } else { + n = res; + } + return n; +} + /* Called without dev->lock held, and only in interrupt context. */ static void ubd_handler(void) { - struct io_thread_req *req; struct ubd *ubd; struct list_head *list, *next_ele; unsigned long flags; int n; + int count; while(1){ - n = os_read_file(thread_fd, &req, - sizeof(struct io_thread_req *)); - if(n != sizeof(req)){ + n = bulk_req_safe_read( + thread_fd, + irq_req_buffer, + &irq_remainder, + &irq_remainder_size, + UBD_REQ_BUFFER_SIZE + ); + if(n < 0){ if(n == -EAGAIN) break; printk(KERN_ERR "spurious interrupt in ubd_handler, " "err = %d\n", -n); return; } - - blk_end_request(req->req, 0, req->length); - kfree(req); + for (count=0;count < n /sizeof(struct io_thread_req *);count++) { + blk_end_request( + (* irq_req_buffer)[count]->req, + 0, + (* irq_req_buffer)[count]->length + ); + kfree((* irq_req_buffer)[count]); + } } reactivate_fd(thread_fd, UBD_IRQ); @@ -1064,6 +1135,28 @@ static int __init ubd_init(void) if (register_blkdev(fake_major, "ubd")) return -1; } + + irq_req_buffer = kmalloc( + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, + GFP_KERNEL + ); + irq_remainder = 0; + + if(irq_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } + io_req_buffer = kmalloc( + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, + GFP_KERNEL + ); + + io_remainder = 0; + + if(io_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } platform_driver_register(&ubd_driver); mutex_lock(&ubd_lock); for (i = 0; i < MAX_DEV; i++){ @@ -1458,31 +1551,49 @@ static int io_count = 0; int io_thread(void *arg) { - struct io_thread_req *req; - int n; + int n, count, written, res; os_fix_helper_signals(); while(1){ - n = os_read_file(kernel_fd, &req, - sizeof(struct io_thread_req *)); - if(n != sizeof(struct io_thread_req *)){ - if(n < 0) + n = bulk_req_safe_read( + kernel_fd, + io_req_buffer, + &io_remainder, + &io_remainder_size, + UBD_REQ_BUFFER_SIZE + ); + if(n < 0){ + if(n == -EAGAIN) { + ubd_read_poll(-1); + continue; + } else { printk("io_thread - read failed, fd = %d, " "err = %d\n", kernel_fd, -n); - else { - printk("io_thread - short read, fd = %d, " - "length = %d\n", kernel_fd, n); } - continue; + } + + for (count=0;count < n /sizeof(struct io_thread_req *);count++) { + io_count++; + do_io((* io_req_buffer)[count]); } - io_count++; - do_io(req); - n = os_write_file(kernel_fd, &req, - sizeof(struct io_thread_req *)); - if(n != sizeof(struct io_thread_req *)) - printk("io_thread - write failed, fd = %d, err = %d\n", - kernel_fd, -n); + + written = 0; + + do { + res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n); + if (res > 0) { + written += res; + } else { + if (res != -EAGAIN) { + printk("io_thread - read failed, fd = %d, " + "err = %d\n", kernel_fd, -n); + } + } + if (written < n) { + ubd_write_poll(-1); + } + } while (written < n); } return 0; diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c index e376f9b..d3240b5 100644 --- a/arch/um/drivers/ubd_user.c +++ b/arch/um/drivers/ubd_user.c @@ -1,4 +1,5 @@ -/* +/* + * Copyright (C) 2016 Anton Ivanov (ai...@br...) * Copyright (C) 2000, 2001, 2002 Jeff Dike (jd...@ka...) * Copyright (C) 2001 Ridgerun,Inc (gl...@ri...) * Licensed under the GPL @@ -20,6 +21,9 @@ #include "ubd.h" #include <os.h> +#include <poll.h> + +struct pollfd kernel_pollfd; int start_io_thread(unsigned long sp, int *fd_out) { @@ -32,9 +36,12 @@ int start_io_thread(unsigned long sp, int *fd_out) } kernel_fd = fds[0]; + kernel_pollfd.fd = kernel_fd; + kernel_pollfd.events = POLLIN; *fd_out = fds[1]; err = os_set_fd_block(*fd_out, 0); + err = os_set_fd_block(kernel_fd, 0); if (err) { printk("start_io_thread - failed to set nonblocking I/O.\n"); goto out_close; @@ -57,3 +64,13 @@ int start_io_thread(unsigned long sp, int *fd_out) out: return err; } + +int ubd_read_poll(int timeout) { + kernel_pollfd.events = POLLIN; + return poll(&kernel_pollfd, 1, timeout); +} +int ubd_write_poll(int timeout) { + kernel_pollfd.events = POLLOUT; + return poll(&kernel_pollfd, 1, timeout); +} + -- 2.1.4 |
From: <ant...@ko...> - 2016-11-09 08:37:16
|
From: Anton Ivanov <ai...@br...> UBD at present is extremely slow because it handles only one request at a time in the IO thread and IRQ handler. The single request at a time is replaced by handling multiple requests as well as necessary workarounds for short reads/writes. Resulting performance improvement in disk IO - 30% Signed-off-by: Anton Ivanov <ai...@br...> --- arch/um/drivers/ubd.h | 5 ++ arch/um/drivers/ubd_kern.c | 167 ++++++++++++++++++++++++++++++++++++++------- arch/um/drivers/ubd_user.c | 21 +++++- 3 files changed, 166 insertions(+), 27 deletions(-) diff --git a/arch/um/drivers/ubd.h b/arch/um/drivers/ubd.h index 3b48cd2..cc1cc85 100644 --- a/arch/um/drivers/ubd.h +++ b/arch/um/drivers/ubd.h @@ -11,5 +11,10 @@ extern int start_io_thread(unsigned long sp, int *fds_out); extern int io_thread(void *arg); extern int kernel_fd; +extern int ubd_read_poll(int timeout); +extern int ubd_write_poll(int timeout); + +#define UBD_REQ_BUFFER_SIZE 64 + #endif diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index f354027..7007ee3 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1,4 +1,5 @@ /* + * Copyright (C) 2015-2016 Anton Ivanov (ai...@br...) * Copyright (C) 2000 Jeff Dike (jd...@ka...) * Licensed under the GPL */ @@ -58,6 +59,17 @@ struct io_thread_req { int error; }; + +static struct io_thread_req * (*irq_req_buffer)[]; +static struct io_thread_req *irq_remainder; +static int irq_remainder_size; + +static struct io_thread_req * (*io_req_buffer)[]; +static struct io_thread_req *io_remainder; +static int io_remainder_size; + + + static inline int ubd_test_bit(__u64 bit, unsigned char *data) { __u64 n; @@ -442,29 +454,90 @@ static void do_ubd_request(struct request_queue * q); static int thread_fd = -1; static LIST_HEAD(restart); -/* XXX - move this inside ubd_intr. */ +/* Function to read several request pointers at a time +* handling fractional reads if (and as) needed +*/ + +static int bulk_req_safe_read( + int fd, + struct io_thread_req * (*request_buffer)[], + struct io_thread_req **remainder, + int *remainder_size, + int max_recs + ) +{ + int n = 0; + int res = 0; + + if (*remainder_size > 0) { + memmove( + (char *) request_buffer, + (char *) remainder, *remainder_size + ); + n = *remainder_size; + } + + res = os_read_file( + fd, + ((char *) request_buffer) + *remainder_size, + sizeof(struct io_thread_req *)*max_recs + - *remainder_size + ); + if (res > 0) { + n += res; + if ((n % sizeof(struct io_thread_req *)) > 0) { + /* + * Read somehow returned not a multiple of dword + * theoretically possible, but never observed in the + * wild, so read routine must be able to handle it + */ + *remainder_size = n % sizeof(struct io_thread_req *); + memmove( + remainder, + ((char *) request_buffer) + + n/sizeof(struct io_thread_req *), + *remainder_size + ); + n = n - *remainder_size; + } + } else { + n = res; + } + return n; +} + /* Called without dev->lock held, and only in interrupt context. */ static void ubd_handler(void) { - struct io_thread_req *req; struct ubd *ubd; struct list_head *list, *next_ele; unsigned long flags; int n; + int count; while(1){ - n = os_read_file(thread_fd, &req, - sizeof(struct io_thread_req *)); - if(n != sizeof(req)){ + n = bulk_req_safe_read( + thread_fd, + irq_req_buffer, + &irq_remainder, + &irq_remainder_size, + UBD_REQ_BUFFER_SIZE + ); + if (n < 0) { if(n == -EAGAIN) break; printk(KERN_ERR "spurious interrupt in ubd_handler, " "err = %d\n", -n); return; } - - blk_end_request(req->req, 0, req->length); - kfree(req); + for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { + blk_end_request( + (*irq_req_buffer)[count]->req, + 0, + (*irq_req_buffer)[count]->length + ); + kfree((*irq_req_buffer)[count]); + } } reactivate_fd(thread_fd, UBD_IRQ); @@ -1064,6 +1137,28 @@ static int __init ubd_init(void) if (register_blkdev(fake_major, "ubd")) return -1; } + + irq_req_buffer = kmalloc( + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, + GFP_KERNEL + ); + irq_remainder = 0; + + if (irq_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } + io_req_buffer = kmalloc( + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, + GFP_KERNEL + ); + + io_remainder = 0; + + if (io_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } platform_driver_register(&ubd_driver); mutex_lock(&ubd_lock); for (i = 0; i < MAX_DEV; i++){ @@ -1458,31 +1553,51 @@ static int io_count = 0; int io_thread(void *arg) { - struct io_thread_req *req; - int n; + int n, count, written, res; os_fix_helper_signals(); while(1){ - n = os_read_file(kernel_fd, &req, - sizeof(struct io_thread_req *)); - if(n != sizeof(struct io_thread_req *)){ - if(n < 0) + n = bulk_req_safe_read( + kernel_fd, + io_req_buffer, + &io_remainder, + &io_remainder_size, + UBD_REQ_BUFFER_SIZE + ); + if (n < 0) { + if (n == -EAGAIN) { + ubd_read_poll(-1); + continue; + } else { printk("io_thread - read failed, fd = %d, " - "err = %d\n", kernel_fd, -n); - else { - printk("io_thread - short read, fd = %d, " - "length = %d\n", kernel_fd, n); + "err = %d," + "reminder = %d\n", + kernel_fd, -n, io_remainder_size); } - continue; } - io_count++; - do_io(req); - n = os_write_file(kernel_fd, &req, - sizeof(struct io_thread_req *)); - if(n != sizeof(struct io_thread_req *)) - printk("io_thread - write failed, fd = %d, err = %d\n", - kernel_fd, -n); + + for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { + io_count++; + do_io((*io_req_buffer)[count]); + } + + written = 0; + + do { + res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n); + if (res > 0) { + written += res; + } else { + if (res != -EAGAIN) { + printk("io_thread - read failed, fd = %d, " + "err = %d\n", kernel_fd, -n); + } + } + if (written < n) { + ubd_write_poll(-1); + } + } while (written < n); } return 0; diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c index e376f9b..6f74479 100644 --- a/arch/um/drivers/ubd_user.c +++ b/arch/um/drivers/ubd_user.c @@ -1,4 +1,5 @@ -/* +/* + * Copyright (C) 2016 Anton Ivanov (ai...@br...) * Copyright (C) 2000, 2001, 2002 Jeff Dike (jd...@ka...) * Copyright (C) 2001 Ridgerun,Inc (gl...@ri...) * Licensed under the GPL @@ -20,6 +21,9 @@ #include "ubd.h" #include <os.h> +#include <poll.h> + +struct pollfd kernel_pollfd; int start_io_thread(unsigned long sp, int *fd_out) { @@ -32,9 +36,12 @@ int start_io_thread(unsigned long sp, int *fd_out) } kernel_fd = fds[0]; + kernel_pollfd.fd = kernel_fd; + kernel_pollfd.events = POLLIN; *fd_out = fds[1]; err = os_set_fd_block(*fd_out, 0); + err = os_set_fd_block(kernel_fd, 0); if (err) { printk("start_io_thread - failed to set nonblocking I/O.\n"); goto out_close; @@ -57,3 +64,15 @@ int start_io_thread(unsigned long sp, int *fd_out) out: return err; } + +int ubd_read_poll(int timeout) +{ + kernel_pollfd.events = POLLIN; + return poll(&kernel_pollfd, 1, timeout); +} +int ubd_write_poll(int timeout) +{ + kernel_pollfd.events = POLLOUT; + return poll(&kernel_pollfd, 1, timeout); +} + -- 2.1.4 |
From: James M. <jam...@ho...> - 2016-11-09 16:08:15
|
Hi I am not clear on the remainder/remainder_size would not a single offset parameter work? rather than copying it off and back? also max_recs does not seem to used except to calculate max buffer size so would not using a buffer size be easier? something like bulk_req_read( int fd, struct io_thread_req *buf, size_t len, size_t *offset) +static int bulk_req_safe_read( + int fd, + struct io_thread_req * (*request_buffer)[], + struct io_thread_req **remainder, + int *remainder_size, + int max_recs + ) +{ + int n = 0; + int res = 0; + + if (*remainder_size > 0) { + memmove( + (char *) request_buffer, + (char *) remainder, *remainder_size + ); + n = *remainder_size; + } + + res = os_read_file( + fd, + ((char *) request_buffer) + *remainder_size, + sizeof(struct io_thread_req *)*max_recs + - *remainder_size then here it would be res = os_read_file(fd, buf+*offset,len-*offset) Note that if this is ever multithreaded buf and offset or request_buffer/remainder/remainder_size need to be kept separate + ); + if (res > 0) { + n += res; + if ((n % sizeof(struct io_thread_req *)) > 0) { + /* + * Read somehow returned not a multiple of dword + * theoretically possible, but never observed in the + * wild, so read routine must be able to handle it + */ maybe this should have a WARN_ON since it should never occur? + *remainder_size = n % sizeof(struct io_thread_req *); + memmove( + remainder, + ((char *) request_buffer) + + n/sizeof(struct io_thread_req *), + *remainder_size I am pretty sure the 2nd parameter is off. I think it should be: ((char *) request_buffer) +(n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *) also copying it off to a shared static buffer? + ); + n = n - *remainder_size; + } + } else { + n = res; + } + return n; +} + /* Called without dev->lock held, and only in interrupt context. */ static void ubd_handler(void) { - struct io_thread_req *req; struct ubd *ubd; struct list_head *list, *next_ele; unsigned long flags; int n; + int count; while(1){ - n = os_read_file(thread_fd, &req, - sizeof(struct io_thread_req *)); - if(n != sizeof(req)){ if it is being rewritten n could just be the number of complete buffers for the for loop below + n = bulk_req_safe_read( + thread_fd, + irq_req_buffer, + &irq_remainder, + &irq_remainder_size, + UBD_REQ_BUFFER_SIZE + ); + if (n < 0) { if(n == -EAGAIN) break; printk(KERN_ERR "spurious interrupt in ubd_handler, " "err = %d\n", -n); return; } - - blk_end_request(req->req, 0, req->length); - kfree(req); + for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { + blk_end_request( + (*irq_req_buffer)[count]->req, + 0, + (*irq_req_buffer)[count]->length + ); + kfree((*irq_req_buffer)[count]); + } } reactivate_fd(thread_fd, UBD_IRQ); @@ -1064,6 +1137,28 @@ static int __init ubd_init(void) if (register_blkdev(fake_major, "ubd")) return -1; } + + irq_req_buffer = kmalloc( + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, + GFP_KERNEL + ); + irq_remainder = 0; + + if (irq_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } Ok, I am really not tracking this, the same buffer allocated twice? + io_req_buffer = kmalloc( + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, + GFP_KERNEL + ); + + io_remainder = 0; + + if (io_req_buffer == NULL) { + printk(KERN_ERR "Failed to initialize ubd buffering\n"); + return -1; + } platform_driver_register(&ubd_driver); mutex_lock(&ubd_lock); for (i = 0; i < MAX_DEV; i++){ Sorry of the poor email I am using a web interface and it keeps dropping chars... Jim |
From: Anton I. <ant...@ko...> - 2016-11-09 16:21:25
|
On 09/11/16 16:08, James McMechan wrote: > Hi > > I am not clear on the remainder/remainder_size would not a single offset parameter work? rather than copying it off and back? Possible. The other alternative is to copy the remainder (if it exists) to the beginning of the buffer at the end of a read and keep only the offset around instead of keeping two params around. You can no longer reuse the whole read function in both the interrupt handler and the io thread though. The reason is that you can copy the remainder to the beginning of the buffer (so you can use only offset as a single param) only after you have "consumed" all the records. > also max_recs does not seem to used except to calculate max buffer size so would not using a buffer size be easier? > something like bulk_req_read( int fd, struct io_thread_req *buf, size_t len, size_t *offset) > > +static int bulk_req_safe_read( > + int fd, > + struct io_thread_req * (*request_buffer)[], > + struct io_thread_req **remainder, > + int *remainder_size, > + int max_recs > + ) > +{ > + int n = 0; > + int res = 0; > + > + if (*remainder_size > 0) { > + memmove( > + (char *) request_buffer, > + (char *) remainder, *remainder_size > + ); > + n = *remainder_size; > + } > + > + res = os_read_file( > + fd, > + ((char *) request_buffer) + *remainder_size, > + sizeof(struct io_thread_req *)*max_recs > + - *remainder_size > > then here it would be > res = os_read_file(fd, buf+*offset,len-*offset) > Note that if this is ever multithreaded buf and offset or request_buffer/remainder/remainder_size need to be kept separate This is executed only in the single IO thread. > > + ); > + if (res > 0) { > + n += res; > + if ((n % sizeof(struct io_thread_req *)) > 0) { > + /* > + * Read somehow returned not a multiple of dword > + * theoretically possible, but never observed in the > + * wild, so read routine must be able to handle it > + */ > > maybe this should have a WARN_ON since it should never occur? Fair point. > > + *remainder_size = n % sizeof(struct io_thread_req *); > + memmove( > + remainder, > + ((char *) request_buffer) + > + n/sizeof(struct io_thread_req *), > + *remainder_size > > I am pretty sure the 2nd parameter is off. I think it should be: > ((char *) request_buffer) +(n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *) > also copying it off to a shared static buffer? Well spotted. > > + ); > + n = n - *remainder_size; > + } > + } else { > + n = res; > + } > + return n; > +} > + > /* Called without dev->lock held, and only in interrupt context. */ > static void ubd_handler(void) > { > - struct io_thread_req *req; > struct ubd *ubd; > struct list_head *list, *next_ele; > unsigned long flags; > int n; > + int count; > > while(1){ > - n = os_read_file(thread_fd, &req, > - sizeof(struct io_thread_req *)); > - if(n != sizeof(req)){ > > if it is being rewritten n could just be the number of complete buffers for the for loop below That will make it more readable. Agree. > > + n = bulk_req_safe_read( > + thread_fd, > + irq_req_buffer, > + &irq_remainder, > + &irq_remainder_size, > + UBD_REQ_BUFFER_SIZE > + ); > + if (n < 0) { > if(n == -EAGAIN) > break; > printk(KERN_ERR "spurious interrupt in ubd_handler, " > "err = %d\n", -n); > return; > } > - > - blk_end_request(req->req, 0, req->length); > - kfree(req); > + for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { > + blk_end_request( > + (*irq_req_buffer)[count]->req, > + 0, > + (*irq_req_buffer)[count]->length > + ); > + kfree((*irq_req_buffer)[count]); > + } > } > reactivate_fd(thread_fd, UBD_IRQ); > > @@ -1064,6 +1137,28 @@ static int __init ubd_init(void) > if (register_blkdev(fake_major, "ubd")) > return -1; > } > + > + irq_req_buffer = kmalloc( > + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, > + GFP_KERNEL > + ); > + irq_remainder = 0; > + > + if (irq_req_buffer == NULL) { > + printk(KERN_ERR "Failed to initialize ubd buffering\n"); > + return -1; > + } > > Ok, I am really not tracking this, the same buffer allocated twice? One for the irq handler called irq_req_buffer - above. One for the io thread called io_req_buffer - below. Both reuse the same function 100% - this is why it is written this way using two params for "remainder/offset" instead of a single offset. If I use just the offset I cannot reuse the code 100% in both sides of the io (the irq and the io thread). > > + io_req_buffer = kmalloc( > + sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE, > + GFP_KERNEL > + ); > + > + io_remainder = 0; > + > + if (io_req_buffer == NULL) { > + printk(KERN_ERR "Failed to initialize ubd buffering\n"); > + return -1; > + } > platform_driver_register(&ubd_driver); > mutex_lock(&ubd_lock); > for (i = 0; i < MAX_DEV; i++){ > > > > Sorry of the poor email I am using a web interface and it keeps dropping chars... No worries. A. > > Jim |
From: Richard W. <ri...@no...> - 2016-11-07 22:30:37
|
Anton, On 30.09.2016 08:53, ant...@ko... wrote: > From: Anton Ivanov <ai...@ko...> > > UBD at present is extremely slow because it handles only > one request at a time in the IO thread and IRQ handler. > > The single request at a time is replaced by handling multiple > requests as well as necessary workarounds for short reads/writes. > > Resulting performance improvement in disk IO - 30% > > Signed-off-by: Anton Ivanov <ai...@ko...> In general I think we can merge this in 4.10. But please run checkpatch.pl and fix all the whitespace issues. :-) Daniel, can you apply your blk-mq work on top of this patch? Thanks, //richard |
From: Anton I. <ant...@ko...> - 2016-11-08 09:03:15
|
I will sort out the whitespace and resubmit tonight. Tomorrow morning latest. Apologies (I thought I had the environment set-up so it does not produce whitespace issues). Brgds, A. On 07/11/16 22:30, Richard Weinberger wrote: > Anton, > > On 30.09.2016 08:53, ant...@ko... wrote: >> From: Anton Ivanov <ai...@ko...> >> >> UBD at present is extremely slow because it handles only >> one request at a time in the IO thread and IRQ handler. >> >> The single request at a time is replaced by handling multiple >> requests as well as necessary workarounds for short reads/writes. >> >> Resulting performance improvement in disk IO - 30% >> >> Signed-off-by: Anton Ivanov <ai...@ko...> > In general I think we can merge this in 4.10. > But please run checkpatch.pl and fix all the whitespace issues. :-) > > Daniel, can you apply your blk-mq work on top of this patch? > > Thanks, > //richard > |