diff --git a/kern/arch/x86/trap.c b/kern/arch/x86/trap.c index 83ad3532ed..0f921cf1ec 100644 --- a/kern/arch/x86/trap.c +++ b/kern/arch/x86/trap.c @@ -543,17 +543,30 @@ void handle_nmi(struct hw_trapframe *hw_tf) static void trap_dispatch(struct hw_trapframe *hw_tf) { struct per_cpu_info *pcpui; + struct preempt_data *vcpd; + struct proc *p; bool handled = FALSE; unsigned long aux = 0; uintptr_t fixup_ip; // Handle processor exceptions. switch(hw_tf->tf_trapno) { + case T_DEBUG: case T_BRKPT: - enable_irq(); - monitor(hw_tf); - disable_irq(); - handled = TRUE; + pcpui = &per_cpu_info[core_id()]; + p = pcpui->cur_proc; + vcpd = &p->procdata->vcore_preempt_data[pcpui->owning_vcoreid]; + + if (in_kernel(hw_tf) || !proc_is_vcctx_ready(p) || + vcpd->notif_disabled) { + /* Trap is in kernel, vcore, or early SCP context. */ + enable_irq(); + monitor(hw_tf); + disable_irq(); + handled = TRUE; + } else { + handled = FALSE; + } break; case T_ILLOP: { diff --git a/tests/block_test.c b/tests/block_test.c index b25dd162e3..63c6ad2e0e 100644 --- a/tests/block_test.c +++ b/tests/block_test.c @@ -1,9 +1,10 @@ -#include +#include #include +#include #include -#include -#include #include +#include +#include "misc-compat.h" pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; #define printf_safe(...) {} @@ -20,16 +21,16 @@ void *my_retvals[NUM_TEST_THREADS]; __thread int my_id; void *block_thread(void* arg) -{ +{ assert(!in_vcore_context()); for (int i = 0; i < NUM_TEST_LOOPS; i++) { - printf_safe("[A] pthread %d on vcore %d\n", pthread_self()->id, vcore_id()); - sys_block(5000 + pthread_self()->id); + printf_safe("[A] pthread %d on vcore %d\n", pthread_id(), vcore_id()); + sys_block(5000 + pthread_id()); } - return (void*)(long)pthread_self()->id; + return (void*)(long)pthread_id(); } -int main(int argc, char** argv) +int main(int argc, char **argv) { struct timeval tv = {0}; if (gettimeofday(&tv, 0)) diff --git a/tests/futex_timeout.c b/tests/futex_timeout.c index 4f64fec133..1e1f56d133 100644 --- a/tests/futex_timeout.c +++ b/tests/futex_timeout.c @@ -1,15 +1,16 @@ -#include -#include +#include #include #include -#include #include +#include +#include +#include "misc-compat.h" #define NUM_THREADS 10 pthread_t thandlers[NUM_THREADS]; void *handler(void *arg) { - int id = pthread_self()->id; + int id = pthread_id(); int var = 0; struct timespec timeout = { .tv_sec = id, diff --git a/tests/misc-compat.h b/tests/misc-compat.h index fcb26c6036..40e894c6b3 100644 --- a/tests/misc-compat.h +++ b/tests/misc-compat.h @@ -4,7 +4,7 @@ #include -#define pthread_id() (pthread_self()->id) +#define pthread_id() (pthread_self()->uthread.id) #else diff --git a/tests/pthread_barrier_test.c b/tests/pthread_barrier_test.c index 76abc045bc..853ccd6b42 100644 --- a/tests/pthread_barrier_test.c +++ b/tests/pthread_barrier_test.c @@ -1,9 +1,10 @@ -#include +#include #include #include -#include -#include +#include #include +#include +#include "misc-compat.h" pthread_barrier_t barrier; @@ -17,16 +18,16 @@ void **my_retvals; bool run_barriertest = FALSE; void *thread(void *arg) -{ +{ while (!run_barriertest) cpu_relax(); for(int i = 0; i < nr_loops; i++) { pthread_barrier_wait(&barrier); } - return (void*)(long)pthread_self()->id; + return (void*)(long)pthread_id(); } -int main(int argc, char** argv) +int main(int argc, char **argv) { struct timeval start_tv = {0}; struct timeval end_tv = {0}; diff --git a/user/parlib/debug.c b/user/parlib/debug.c index 46a3e4ed72..f927668fe1 100644 --- a/user/parlib/debug.c +++ b/user/parlib/debug.c @@ -1,9 +1,20 @@ +#include +#include +#include +#include #include +#include +#include #include -#include -#include +#include #include +#include #include +#include +#include +#include +#include +#include int akaros_printf(const char *format, ...) { @@ -18,7 +29,7 @@ int akaros_printf(const char *format, ...) /* Poor man's Ftrace, won't work well with concurrency. */ static const char *blacklist[] = { - "whatever", + "whatever", }; static bool is_blacklisted(const char *s) @@ -53,7 +64,7 @@ void __print_func_entry(const char *func, const char *file) if (is_blacklisted(func)) return; spinlock_lock(&lock); - printd("Vcore %2d", vcore_id()); /* helps with multicore output */ + printd("Vcore %2d", vcore_id()); /* helps with multicore output */ for (int i = 0; i < tab_depth; i++) printf("\t"); printf("%s() in %s\n", func, file); @@ -75,3 +86,767 @@ void __print_func_exit(const char *func, const char *file) printf("---- %s()\n", func); spinlock_unlock(&lock); } + +static void handle_debug_msg(struct syscall *sysc); +static void debug_read(int fd, void *buf, size_t len); +static int handle_one_msg(struct event_mbox *ev_mbox); +static void debug_read_handler(struct event_queue *ev_q); + +static int debug_send_and_block(int fd, struct d9_header *hdr, + int (*fn)(struct d9_header *msg, void *arg), + void *arg); +static int debug_send_packet(int fd, struct d9_header *hdr); +static int debug_send_error(uint32_t errnum); +static struct d9_header *debug_read_packet(int fd); +static int read_all(int fd, void *data, size_t size); +static int check_error_packet(struct d9_header *hdr, enum d9_msg_t expected); + +static int d9s_treadmem(struct d9_header *hdr); +static int d9s_tstoremem(struct d9_header *hdr); +static int d9s_tfetchreg(struct d9_header *hdr); +static int d9s_tstorereg(struct d9_header *hdr); +static int d9s_tresume(struct d9_header *hdr); +static int d9s_tinit(struct d9_header *hdr); + +static int d9c_thitbreakpoint(struct d9_header *hdr); +static int d9c_taddthread(struct d9_header *hdr); + +/* ev_q for read syscall on debug pipe. */ +static struct event_queue *debug_read_ev_q; +static struct syscall debug_read_sysc; +static int debug_fd = -1; +static atomic_t debugged; +static atomic_t last_breakpoint_tid; + +/* async_read_buf is the buffer used to read the header of a packet. + * + * We can only have one of these reads at a time anyway. */ +static char async_read_buf[sizeof(struct d9_header)]; + +/* d9_ops are user-supplied routines that fill in the response packet with the + * appropriate information and/or do the requested operation. These may block. + */ +static struct d9_ops *d9_ops; + +/* A message handler is an internal routine that allocates the appropriate + * response packet and coordinates sending the appropriate response to + * gdbserver. */ +typedef int (*message_handler)(struct d9_header *hdr); +#define D9_HANDLER(x) (x - D9_TREADMEM) + +static message_handler srv_msg_handlers[D9_HANDLER(D9_NUM_MESSAGES)] = { + d9s_treadmem, /* TREADMEM */ + NULL, /* RREADMEM */ + d9s_tstoremem, /* TSTOREMEM */ + NULL, /* RSTOREMEM */ + d9s_tfetchreg, /* TFETCHREG */ + NULL, /* RFETCHREG */ + d9s_tstorereg, /* TSTOREREG */ + NULL, /* RSTOREREG */ + NULL, /* TERROR */ + NULL, /* RERROR */ + NULL, /* THITBREAKPOINT */ + NULL, /* RHITBREAKPOINT */ + d9s_tresume, /* TRESUME */ + NULL, /* RRESUME */ + NULL, /* TADDTHREAD */ + NULL, /* RADDTHREAD */ + d9s_tinit, /* TINIT */ + NULL, /* RINIT */ +}; + +/* gdbserver can also receive messages it didn't ask for, e.g. TADDTHREAD. The + * handler lives here. */ +static message_handler clt_msg_handlers[D9_HANDLER(D9_NUM_MESSAGES)] = { + NULL, /* TREADMEM */ + NULL, /* RREADMEM */ + NULL, /* TSTOREMEM */ + NULL, /* RSTOREMEM */ + NULL, /* TFETCHREG */ + NULL, /* RFETCHREG */ + NULL, /* TSTOREREG */ + NULL, /* RSTOREREG */ + NULL, /* TERROR */ + NULL, /* RERROR */ + d9c_thitbreakpoint, /* THITBREAKPOINT */ + NULL, /* RHITBREAKPOINT */ + NULL, /* TRESUME */ + NULL, /* RRESUME */ + d9c_taddthread, /* TADDTHREAD */ + NULL, /* RADDTHREAD */ + NULL, /* TINIT */ + NULL, /* RINIT */ +}; + +/* queue_handle_one_message extracts one message from the mbox and calls the + * appropriate handler for that message. */ +static int queue_handle_one_msg(struct event_mbox *ev_mbox) +{ + struct event_msg msg; + struct syscall *sysc; + + if (!extract_one_mbox_msg(ev_mbox, &msg)) + return 0; + + assert(msg.ev_type == EV_SYSCALL); + sysc = msg.ev_arg3; + assert(sysc); + handle_debug_msg(sysc); + return 1; +} + +/* queue_read_handler is the event queue handler for the async read evq. */ +static void queue_read_handler(struct event_queue *ev_q) +{ + assert(ev_q); + assert(ev_q->ev_mbox); + + while (queue_handle_one_msg(ev_q->ev_mbox)) + ; +} + +void d9s_init(struct d9_ops *dops) +{ + int fd; + int p[2]; + char buf[60]; + int ret; + + /* Set up parlib queue for asynchronous read notifications. */ + debug_read_ev_q = get_eventq(EV_MBOX_UCQ); + debug_read_ev_q->ev_flags = + EVENT_IPI | EVENT_INDIR | EVENT_SPAM_INDIR | EVENT_WAKEUP; + debug_read_ev_q->ev_handler = queue_read_handler; + + /* Set d9 ops. */ + d9_ops = dops; + + /* Open a pipe and post it in #srv. + * TODO(chrisko): add special file #proc/PID/debug that works like #srv. + */ + ret = pipe(p); + if (ret < 0) + panic("could not get pipe."); + + snprintf(buf, sizeof(buf), "#srv/debug-%d", getpid()); + fd = open(buf, O_WRONLY | O_CREAT, 0666); + if (fd < 0) + panic("could not open debug file."); + + snprintf(buf, sizeof(buf), "%d", p[1]); + if (write(fd, buf, strlen(buf)) != strlen(buf)) + panic("could not write fd to debug file."); + + close(p[1]); + + debug_fd = p[0]; + debug_read(debug_fd, async_read_buf, sizeof(struct d9_header)); +} + +static void debug_read(int fd, void *buf, size_t len) +{ + memset(&debug_read_sysc, 0, sizeof(struct syscall)); + syscall_async(&debug_read_sysc, SYS_read, fd, buf, len); + + if (!register_evq(&debug_read_sysc, debug_read_ev_q)) + handle_debug_msg(&debug_read_sysc); +} + +static void handle_debug_msg(struct syscall *sysc) +{ + struct d9_header *hdr, *thdr; + + switch (sysc->retval) { + case 0: + panic("read syscall: got 0 bytes."); + break; + case -1: + panic("read failed"); + break; + default: + if (sysc->retval != sizeof(struct d9_header)) + panic("2LS debug: should have received D9 header."); + + thdr = (struct d9_header *)sysc->arg1; + + /* Allocate a continuous chunk of the memory for the message. */ + hdr = calloc(thdr->size, 1); + if (hdr == NULL) + panic("handle_debug_msg: calloc failed."); + + /* Copy header over. */ + memcpy(hdr, thdr, sizeof(struct d9_header)); + + /* Read the remaining bytes of the message. */ + size_t msg_size = hdr->size - sizeof(struct d9_header); + if (msg_size > 0 && read_all(debug_fd, hdr + 1, msg_size)) + panic("handle_debug_msg: read_all failed."); + + /* Call the appropriate handler for this packet. */ + if (srv_msg_handlers[D9_HANDLER(hdr->msg_type)] != NULL) + srv_msg_handlers[D9_HANDLER(hdr->msg_type)](hdr); + else + panic("2LS debug: no message handler found."); + + free(hdr); + debug_read(debug_fd, async_read_buf, sizeof(struct d9_header)); + } +} + +/* alloc_packet allocates memory for a packet of given type. */ +static struct d9_header *alloc_packet(size_t pck_len, enum d9_msg_t msg_type) +{ + struct d9_header *hdr; + + if (pck_len >= UINT32_MAX) + panic("2LS debug: packet too long."); + + hdr = calloc(1, pck_len); + if (hdr == NULL) + return NULL; + + hdr->size = pck_len; + hdr->msg_type = msg_type; + return hdr; +} + +/* d9s_read_memory is the d9_ops func called to fulfill a TREADMEM request. */ +int d9s_read_memory(const struct d9_treadmem_msg *req, + struct d9_rreadmem_msg *resp) +{ + resp->length = req->length; + /* TODO(chrisko): can check page tables to see whether this should actually + * succeed instead of letting it fault. */ + memcpy(resp->data, (void *)req->address, req->length); + return 0; +} + +/* d9s_store_memory is the d9_ops func called to fulfill a TSTOREMEM request. */ +int d9s_store_memory(const struct d9_tstoremem_msg *req) +{ + /* TODO(chrisko): can check page tables to see whether this should actually + * succeed instead of letting it fault. */ + memcpy((void *)req->address, req->data, req->length); + return 0; +} + +/* d9s_resume is the d9_ops func called to fulfill a TRESUME request. */ +void d9s_resume(struct uthread *t, bool singlestep) +{ + if (t) { + /* Only single-step if a specific thread was specified. */ + if (singlestep) + uthread_enable_single_step(t); + else + uthread_disable_single_step(t); + uthread_runnable(t); + } else { + uthread_apply_all(uthread_runnable); + } +} + +int d9s_notify_hit_breakpoint(uint64_t tid, uint64_t address) +{ + struct d9_thitbreakpoint thb = + D9_INIT_HDR(sizeof(struct d9_thitbreakpoint), D9_THITBREAKPOINT); + + atomic_swap(&last_breakpoint_tid, tid); + + if (!atomic_read(&debugged)) + return 0; + + thb.msg.pid = getpid(); + thb.msg.tid = tid; + thb.msg.address = address; + + return debug_send_packet(debug_fd, &(thb.hdr)); +} + +int d9s_notify_add_thread(uint64_t tid) +{ + struct d9_taddthread tat = + D9_INIT_HDR(sizeof(struct d9_taddthread), D9_TADDTHREAD); + + if (!atomic_read(&debugged)) + return 0; + + tat.msg.pid = getpid(); + tat.msg.tid = tid; + + return debug_send_packet(debug_fd, &(tat.hdr)); +} + +void notify_thread(struct uthread *t) +{ + (void)d9s_notify_add_thread(t->id); +} + +static int d9s_tinit(struct d9_header *hdr) +{ + uint64_t tid; + struct uthread *t; + struct d9_rinit resp = D9_INIT_HDR(sizeof(struct d9_rinit), D9_RINIT); + + if (!atomic_cas(&debugged, 0, 1)) + return debug_send_error(EBADF /* TODO */); + + uthread_apply_all(notify_thread); + + if ((tid = atomic_read(&last_breakpoint_tid)) > 0) { + t = uthread_get_thread_by_id(tid); + d9s_notify_hit_breakpoint(tid, get_user_ctx_pc(&t->u_ctx)); + uthread_put_thread(t); + } + + return debug_send_packet(debug_fd, &(resp.hdr)); +} + +/* d9s_tresume resumes execution in all threads. + * + * This looks a bit different than all the other routines: The actual op is done + * after sending a successful response. The response basically serves to let the + * client know that the message was received, but not that the work was done. + * + * There's two scenarios for resume we care about at the moment: + * 1) We resume and run until the program hits another breakpoint. + * 2) We resume and run until the program exits. + * + * In the second case, the program could exit before the 2LS has a chance to + * send the successful response. So we send the response first and assume that + * resume cannot fail. (If it does, it wouldn't fail in a way we can currently + * detect anyway.) + */ +static int d9s_tresume(struct d9_header *hdr) +{ + int ret; + struct uthread *t = NULL; + struct d9_rresume resp = D9_INIT_HDR(sizeof(struct d9_rresume), D9_RRESUME); + struct d9_tresume *req = (struct d9_tresume *)hdr; + + if (d9_ops == NULL || d9_ops->resume == NULL) + return debug_send_error(EBADF /* TODO: better error code */); + + if (req->msg.tid > 0) { + /* Find the appropriate thread. */ + t = uthread_get_thread_by_id(req->msg.tid); + if (t == NULL) + return debug_send_error(EBADF /* TODO */); + } + + ret = debug_send_packet(debug_fd, &(resp.hdr)); + + /* Call user-supplied routine. */ + d9_ops->resume(t, req->msg.singlestep); + + return ret; +} + +/* d9s_tstoremem allocates the response packet, calls the user-supplied ops + * function for storing memory, and sends the response packet. */ +static int d9s_tstoremem(struct d9_header *hdr) +{ + int ret; + struct d9_rstoremem resp = + D9_INIT_HDR(sizeof(struct d9_rstoremem), D9_RSTOREMEM); + struct d9_tstoremem *req = (struct d9_tstoremem *)hdr; + + if (d9_ops == NULL || d9_ops->store_memory == NULL) + return debug_send_error(EBADF /* TODO: better error code */); + + /* Call user-supplied routine for filling in response packet. */ + ret = d9_ops->store_memory(&(req->msg)); + + if (ret < 0) + return debug_send_error(-ret); + + return debug_send_packet(debug_fd, &(resp.hdr)); +} + +/* d9s_treadmem allocates the response packet, calls the user-supplied ops + * function for reading from memory, and sends the response packet. */ +static int d9s_treadmem(struct d9_header *hdr) +{ + int ret; + struct d9_rreadmem *resp; + struct d9_treadmem *req = (struct d9_treadmem *)hdr; + struct d9_header *rhdr = + alloc_packet(sizeof(struct d9_rreadmem) + req->msg.length, D9_RREADMEM); + if (rhdr == NULL) + return debug_send_error(ENOMEM /* TODO */); + + if (d9_ops == NULL || d9_ops->read_memory == NULL) + return debug_send_error(EBADF /* TODO */); + + resp = (struct d9_rreadmem *)rhdr; + + /* Call user-supplied routine for filling in response packet. */ + ret = d9_ops->read_memory(&(req->msg), &(resp->msg)); + + if (ret < 0) { + free(rhdr); + return debug_send_error(-ret); + } + + ret = debug_send_packet(debug_fd, rhdr); + free(rhdr); + return ret; +} + +/* d9s_tstorereg allocates the response packet, finds the appropriate thread + * structure, calls the user-supplied ops function for storing its registers, + * and sends the response packet. */ +static int d9s_tstorereg(struct d9_header *hdr) +{ + int ret; + struct uthread *t; + struct d9_header *rpack; + struct d9_rstorereg resp = + D9_INIT_HDR(sizeof(struct d9_rstorereg), D9_RSTOREREG); + struct d9_tstorereg *req = (struct d9_tstorereg *)hdr; + + if (d9_ops == NULL || d9_ops->store_registers == NULL) + return debug_send_error(EBADF /* TODO */); + + /* Find the appropriate thread. */ + t = uthread_get_thread_by_id(req->msg.threadid); + if (t == NULL) + return debug_send_error(EBADF /* TODO */); + + /* Call user-supplied routine for filling in response packet. */ + ret = d9_ops->store_registers(t, &(req->msg.regs)); + + if (ret < 0) + return debug_send_error(-ret); + + /* Successful response. */ + return debug_send_packet(debug_fd, &(resp.hdr)); +} + +/* d9s_tfetchreg allocates the response packet, finds the appropriate thread + * structure, calls the user-supplied ops function for reading its registers, + * and sends the response packet. */ +static int d9s_tfetchreg(struct d9_header *hdr) +{ + int ret; + struct uthread *t; + struct d9_rfetchreg resp = + D9_INIT_HDR(sizeof(struct d9_rfetchreg), D9_RFETCHREG); + struct d9_tfetchreg *req = (struct d9_tfetchreg *)hdr; + + if (d9_ops == NULL || d9_ops->fetch_registers == NULL) + return debug_send_error(EBADF /* TODO */); + + /* Find the appropriate thread. */ + t = uthread_get_thread_by_id(req->msg.threadid); + if (t == NULL) + return debug_send_error(EBADF /* TODO */); + + /* Call user-supplied routine for filling in response packet. */ + ret = d9_ops->fetch_registers(t, &(resp.msg.regs)); + + if (ret < 0) + return debug_send_error(-ret); + + return debug_send_packet(debug_fd, &(resp.hdr)); +} + +/* debug_send_error sends an error response to gdbserver. */ +static int debug_send_error(uint32_t errnum) +{ + struct d9_rerror rerror = D9_INIT_HDR(sizeof(struct d9_rerror), D9_RERROR); + + rerror.msg.errnum = errnum; + + return debug_send_packet(debug_fd, &(rerror.hdr)); +} + +static int debug_send_packet(int fd, struct d9_header *hdr) +{ + ssize_t wlen, total = 0; + + while (total < hdr->size) { + wlen = write(fd, (uint8_t *)hdr + total, hdr->size - total); + if (wlen < 0) { + if (errno == EINTR) + continue; + else + return -1; + } + total += wlen; + } + return 0; +} + +/* read_all will keep calling read until `size` bytes have been read or an error + * occurs. */ +static int read_all(int fd, void *data, size_t size) +{ + ssize_t rlen, len_read = 0; + + while (len_read < size) { + rlen = read(fd, (uint8_t *)data + len_read, size - len_read); + if (rlen < 0) { + if (errno == EINTR) + continue; + else + return -1; + } + len_read += rlen; + } + return 0; +} + +/* debug_read_packet will read a packet in its exact length. */ +static struct d9_header *debug_read_packet(int fd) +{ + size_t msg_size; + struct d9_header *hdr = malloc(sizeof(struct d9_header)); + + if (hdr == NULL) + panic("2LS debug: could not malloc"); + + /* Read message header. */ + if (read_all(fd, hdr, sizeof(struct d9_header))) { + free(hdr); + return NULL; + } + + /* Read the remaining bytes of the message. */ + msg_size = hdr->size - sizeof(struct d9_header); + if (msg_size > 0) { + hdr = realloc(hdr, hdr->size); + if (hdr == NULL) + panic("2LS debug: could not realloc"); + + if (read_all(fd, hdr + 1, msg_size)) { + perror("d9 read"); + free(hdr); + return NULL; + } + } + + return hdr; +} + +/* Globals to deal with incoming messages on gdbserver side. + * + * TODO(chrisko): Instead of making these global, make a struct to pass to + * read_thread and the d9c_ functions. + */ +static struct d9_header *d9c_message; +static uth_mutex_t sync_lock; +static uth_cond_var_t sync_cv; + +void *d9c_read_thread(void *arg) +{ + struct d9_header *hdr; + int fd = *((int *)arg); + ssize_t rlen = 0; + + while (1) { + hdr = debug_read_packet(fd); + if (hdr == NULL) + return NULL; + + if (IS_MSG_R(hdr->msg_type)) { + /* If this is a response message type, the main gdbserver thread is + * blocked on this response. */ + uth_mutex_lock(sync_lock); + d9c_message = hdr; + uth_mutex_unlock(sync_lock); + uth_cond_var_broadcast(sync_cv); + } else if (clt_msg_handlers[D9_HANDLER(hdr->msg_type)]) { + /* This is a message that isn't a response to a request (e.g. a + * thread was added or we hit a breakpoint). */ + clt_msg_handlers[D9_HANDLER(hdr->msg_type)](hdr); + free(hdr); + } else { + panic("2LS received invalid message type (type = %d, size = %d)\n", + hdr->msg_type, hdr->size); + free(hdr); + } + } +} + +static struct d9c_ops *client_ops; + +/* d9c_thitbreakpoint is called when the 2LS sends a THITBREAKPOINT msg. */ +int d9c_thitbreakpoint(struct d9_header *hdr) +{ + struct d9_thitbreakpoint *thb = (struct d9_thitbreakpoint *)hdr; + + return client_ops->hit_breakpoint(thb->msg.pid, thb->msg.tid, + thb->msg.address); +} + +/* d9c_taddthread is called when the 2LS sends a TADDTHREAD msg. */ +int d9c_taddthread(struct d9_header *hdr) +{ + struct d9_taddthread *tat = (struct d9_taddthread *)hdr; + + return client_ops->add_thread(tat->msg.pid, tat->msg.tid); +} + +static int check_error_packet(struct d9_header *hdr, enum d9_msg_t expected) +{ + struct d9_rerror *rerror; + + /* Msg is of expected type -- no error. */ + if (hdr->msg_type == expected) + return 0; + + if (hdr->msg_type == D9_RERROR) { + /* Got error message. */ + rerror = (struct d9_rerror *)hdr; + errno = rerror->msg.errnum; + } else { + /* Neither got an error message nor the expected message. */ + errno = EIO; + } + return 1; +} + +static int debug_send_and_block(int fd, struct d9_header *hdr, + int (*fn)(struct d9_header *msg, void *arg), + void *arg) +{ + int ret; + + uth_mutex_lock(sync_lock); + ret = debug_send_packet(fd, hdr); + if (ret) + goto send_unlock; + + /* Wait for response message. */ + while (d9c_message == NULL) + uth_cond_var_wait(sync_cv, sync_lock); + + if (check_error_packet(d9c_message, hdr->msg_type + 1)) { + perror("d9 send and block"); + ret = -1; + } else { + ret = fn ? fn(d9c_message, arg) : 0; + } + + free(d9c_message); + d9c_message = NULL; + +send_unlock: + uth_mutex_unlock(sync_lock); + return ret; +} + +/* d9c_store_memory communicates with the 2LS to store from an address in + * memory. */ +int d9c_store_memory(int fd, uintptr_t address, const void *const data, + uint32_t length) +{ + int ret; + struct d9_header *rhdr; + struct d9_tstoremem *req; + + rhdr = alloc_packet(sizeof(struct d9_tstoremem) + length, D9_TSTOREMEM); + if (rhdr == NULL) + return -1; + + req = (struct d9_tstoremem *)rhdr; + req->msg.address = address; + req->msg.length = length; + memcpy(&(req->msg.data), data, length); + + ret = debug_send_and_block(fd, rhdr, NULL, NULL); + free(rhdr); + return ret; +} + +static int d9c_read_memory_callback(struct d9_header *msg, void *arg) +{ + struct d9_rreadmem *resp = (struct d9_rreadmem *)msg; + + memcpy(arg, resp->msg.data, resp->msg.length); + return 0; +} + +/* d9c_read_memory communicates with the 2LS to read from an address in memory. +*/ +int d9c_read_memory(int fd, uintptr_t address, uint32_t length, uint8_t *buf) +{ + struct d9_treadmem req = + D9_INIT_HDR(sizeof(struct d9_treadmem), D9_TREADMEM); + struct d9_rreadmem *rhdr; + + req.msg.address = address; + req.msg.length = length; + + return debug_send_and_block(fd, &(req.hdr), &d9c_read_memory_callback, buf); +} + +/* d9c_store_registers communicates with the 2LS to change register values. */ +int d9c_store_registers(int fd, uint64_t tid, struct d9_regs *regs) +{ + struct d9_tstorereg req = + D9_INIT_HDR(sizeof(struct d9_tstorereg), D9_TSTOREREG); + + req.msg.threadid = tid; + memcpy(&(req.msg.regs), regs, sizeof(struct d9_regs)); + + return debug_send_and_block(fd, &(req.hdr), NULL, NULL); +} + +static int d9c_fetch_registers_callback(struct d9_header *msg, void *arg) +{ + /* Store registers in pointer given by user of d9c_fetch_registers. */ + struct d9_rfetchreg *resp = (struct d9_rfetchreg *)msg; + + memcpy(arg, &(resp->msg.regs), sizeof(struct d9_regs)); + return 0; +} + +/* d9c_fetch_registers communicates with the 2LS to read register values. */ +int d9c_fetch_registers(int fd, uint64_t tid, struct d9_regs *regs) +{ + struct d9_tfetchreg req = + D9_INIT_HDR(sizeof(struct d9_tfetchreg), D9_TFETCHREG); + + req.msg.threadid = tid; + + return debug_send_and_block(fd, &(req.hdr), &d9c_fetch_registers_callback, + regs); +} + +/* d9c_resume tells the 2LS to resume all threads. */ +int d9c_resume(int fd, uint64_t tid, bool singlestep) +{ + struct d9_tresume req = D9_INIT_HDR(sizeof(struct d9_tresume), D9_TRESUME); + + req.msg.tid = tid; + req.msg.singlestep = singlestep; + + return debug_send_and_block(fd, &(req.hdr), NULL, NULL); +} + +/* d9c_attach opens the connection to the process. */ +int d9c_attach(unsigned long pid) +{ + char buf[60]; + int debug_fd; + + /* TODO(chrisko): #proc/pid/debug */ + snprintf(buf, sizeof(buf), "#srv/debug-%lu", pid); + /* Just retry that for now. */ + while ((debug_fd = open(buf, O_RDWR)) == -1) + sys_block(100); + + return debug_fd; +} + +int d9c_init(int fd, struct d9c_ops *ops) +{ + struct d9_tinit req = D9_INIT_HDR(sizeof(struct d9_tinit), D9_TINIT); + + client_ops = ops; + sync_lock = uth_mutex_alloc(); + sync_cv = uth_cond_var_alloc(); + + return debug_send_and_block(fd, &(req.hdr), NULL, NULL); +} diff --git a/user/parlib/include/parlib/debug.h b/user/parlib/include/parlib/debug.h new file mode 100644 index 0000000000..af360b847d --- /dev/null +++ b/user/parlib/include/parlib/debug.h @@ -0,0 +1,228 @@ +#pragma once + +#include +#include + +/* Message types of D9. + * T messages have to be even, R messages have to be odd. T messages are sent + * from gdbserver to 2LS, R messages the other way. + * + * If you modify this, also make sure that srv_msg_handlers and clt_msg_handlers + * are still correct in debug.c. + */ +enum d9_msg_t { + D9_TREADMEM = 10, + D9_RREADMEM, + D9_TSTOREMEM, + D9_RSTOREMEM, + D9_TFETCHREG, + D9_RFETCHREG, + D9_TSTOREREG, + D9_RSTOREREG, + D9_TERROR, /* Do not use. */ + D9_RERROR, + D9_THITBREAKPOINT, + D9_RHITBREAKPOINT, + D9_TRESUME, + D9_RRESUME, + D9_TADDTHREAD, + D9_RADDTHREAD, + D9_TINIT, + D9_RINIT, + D9_NUM_MESSAGES, /* Do not use. */ +}; + +#define IS_MSG_T(type) ((type) % 2 == 0) +#define IS_MSG_R(type) ((type) % 2 == 1) + +struct d9_header { + uint32_t size; + uint32_t msg_type; +} __attribute__((packed)); + +/* Initialization message. */ +struct d9_tinit { + struct d9_header hdr; +} __attribute__((packed)); + +struct d9_rinit { + struct d9_header hdr; +} __attribute__((packed)); + +/* Error message */ +struct d9_rerror_msg { + uint32_t errnum; +} __attribute__((packed)); + +struct d9_rerror { + struct d9_header hdr; + struct d9_rerror_msg msg; +} __attribute__((packed)); + +/* reading memory */ +struct d9_treadmem_msg { + uintptr_t address; + uint32_t length; +} __attribute__((packed)); + +struct d9_treadmem { + struct d9_header hdr; + struct d9_treadmem_msg msg; +} __attribute__((packed)); + +struct d9_rreadmem_msg { + uint32_t length; + uint8_t data[]; /* Variable length; must be the last member. */ +} __attribute__((packed)); + +struct d9_rreadmem { + struct d9_header hdr; + struct d9_rreadmem_msg msg; +} __attribute__((packed)); + +/* storing memory */ +struct d9_tstoremem_msg { + uintptr_t address; + uint32_t length; + uint8_t data[]; /* Variable length; must be the last member. */ +} __attribute__((packed)); + +struct d9_tstoremem { + struct d9_header hdr; + struct d9_tstoremem_msg msg; +} __attribute__((packed)); + +struct d9_rstoremem { + struct d9_header hdr; +} __attribute__((packed)); + +/* fetching registers */ +struct d9_tfetchreg_msg { + uint64_t threadid; +} __attribute__((packed)); + +struct d9_tfetchreg { + struct d9_header hdr; + struct d9_tfetchreg_msg msg; +} __attribute__((packed)); + +struct d9_rfetchreg_msg { + struct d9_regs regs; /* Architecture-dependent. */ +} __attribute__((packed)); + +struct d9_rfetchreg { + struct d9_header hdr; + struct d9_rfetchreg_msg msg; +} __attribute__((packed)); + +/* storing registers */ +struct d9_tstorereg_msg { + uint64_t threadid; + struct d9_regs regs; +} __attribute__((packed)); + +struct d9_tstorereg { + struct d9_header hdr; + struct d9_tstorereg_msg msg; +} __attribute__((packed)); + +struct d9_rstorereg { + struct d9_header hdr; +} __attribute__((packed)); + +/* resuming */ +struct d9_tresume_msg { + uint64_t tid; + bool singlestep : 1; +} __attribute__((packed)); + +struct d9_tresume { + struct d9_header hdr; + struct d9_tresume_msg msg; +} __attribute__((packed)); + +struct d9_rresume { + struct d9_header hdr; +} __attribute__((packed)); + +/* hitting a breakpoint */ +struct d9_thitbreakpoint_msg { + pid_t pid; + uint64_t tid; + uint64_t address; +} __attribute__((packed)); + +struct d9_thitbreakpoint { + struct d9_header hdr; + struct d9_thitbreakpoint_msg msg; +} __attribute__((packed)); + +/* adding a thread */ +struct d9_taddthread_msg { + pid_t pid; + uint64_t tid; +} __attribute__((packed)); + +struct d9_taddthread { + struct d9_header hdr; + struct d9_taddthread_msg msg; +} __attribute__((packed)); + +#define D9_INIT_HDR(len, msgt) \ + { \ + .hdr = {.size = (len), .msg_type = (msgt) } \ + } + +/* 2LS ops. + * + * These represent the actual operations to be carried out for each message + * type sent to the 2LS. This serves to keep the actual operations separate from + * the implementation of the protocol itself. + */ +struct d9_ops { + int (*read_memory)(const struct d9_treadmem_msg *req, + struct d9_rreadmem_msg *resp); + int (*store_memory)(const struct d9_tstoremem_msg *req); + int (*fetch_registers)(struct uthread *t, struct d9_regs *resp); + int (*store_registers)(struct uthread *t, struct d9_regs *resp); + void (*resume)(struct uthread *t, bool singlestep); +}; + +/* gdbserver ops. + * + * These represent the actual operations to be carried out for each message type + * sent to gdbserver. + */ +struct d9c_ops { + /* hit_breakpoint is called when the process hits a breakpoint. */ + int (*hit_breakpoint)(pid_t pid, uint64_t tid, uint64_t address); + + /* add_thread is called when the process adds a new thread. */ + int (*add_thread)(pid_t pid, uint64_t tid); +}; + +/* 2LS-side functions. */ +void d9s_init(struct d9_ops *debug_ops); + +/* Implementations of d9_ops. */ +int d9s_read_memory(const struct d9_treadmem_msg *req, + struct d9_rreadmem_msg *resp); +int d9s_store_memory(const struct d9_tstoremem_msg *req); +void d9s_resume(struct uthread *t, bool singlestep); + +/* Helpers to send messages from 2LS to gdbserver. */ +int d9s_notify_hit_breakpoint(uint64_t tid, uint64_t address); +int d9s_notify_add_thread(uint64_t tid); + +/* gdbserver-side functions. */ +int d9c_attach(unsigned long pid); +void *d9c_read_thread(void *arg); + +/* Helpers to send messages from gdbserver to 2LS. */ +int d9c_read_memory(int fd, uintptr_t address, uint32_t length, uint8_t *buf); +int d9c_store_memory(int fd, uintptr_t address, const void *const data, + uint32_t length); +int d9c_fetch_registers(int fd, uint64_t tid, struct d9_regs *regs); +int d9c_store_registers(int fd, uint64_t tid, struct d9_regs *regs); +int d9c_resume(int fd, uint64_t tid, bool singlestep); +int d9c_init(int fd, struct d9c_ops *ops); diff --git a/user/parlib/include/parlib/uthread.h b/user/parlib/include/parlib/uthread.h index f0b2b07732..72fd5eea62 100644 --- a/user/parlib/include/parlib/uthread.h +++ b/user/parlib/include/parlib/uthread.h @@ -3,6 +3,7 @@ #include #include #include +#include __BEGIN_DECLS @@ -24,6 +25,8 @@ __BEGIN_DECLS * cast their threads to uthreads when talking with vcore code. Vcore/default * 2LS code won't touch udata or beyond. */ struct uthread { + LIST_ENTRY(uthread) entry; + uint64_t id; struct user_context u_ctx; struct ancillary_state as; void *tls_desc; @@ -102,6 +105,11 @@ void uthread_sleep_forever(void); void uthread_has_blocked(struct uthread *uthread, int flags); void uthread_paused(struct uthread *uthread); +/* Look up and return uthreads. */ +struct uthread *uthread_get_thread_by_id(uint64_t id); +void uthread_put_thread(struct uthread *uth); +void uthread_apply_all(void (*fn)(struct uthread *)); + /* Utility functions */ bool __check_preempt_pending(uint32_t vcoreid); /* careful: check the code */ void uth_disable_notifs(void); diff --git a/user/parlib/include/parlib/x86/debug.h b/user/parlib/include/parlib/x86/debug.h new file mode 100644 index 0000000000..4640077934 --- /dev/null +++ b/user/parlib/include/parlib/x86/debug.h @@ -0,0 +1,35 @@ +#pragma once + +#include + +struct d9_regs { + uint64_t reg_rax; + uint64_t reg_rbx; + uint64_t reg_rcx; + uint64_t reg_rdx; + uint64_t reg_rsp; + uint64_t reg_rbp; + uint64_t reg_rsi; + uint64_t reg_rdi; + uint64_t reg_rip; + uint64_t reg_r8; + uint64_t reg_r9; + uint64_t reg_r10; + uint64_t reg_r11; + uint64_t reg_r12; + uint64_t reg_r13; + uint64_t reg_r14; + uint64_t reg_r15; + uint64_t reg_eflags; + uint64_t reg_cs; + uint64_t reg_ss; + uint64_t reg_ds; + uint64_t reg_es; + uint64_t reg_fs; + uint64_t reg_gs; +}; + +void uthread_disable_single_step(struct uthread *t); +void uthread_enable_single_step(struct uthread *t); +int d9_fetch_registers(struct uthread *t, struct d9_regs *resp); +int d9_store_registers(struct uthread *t, struct d9_regs *resp); diff --git a/user/parlib/include/parlib/x86/trap.h b/user/parlib/include/parlib/x86/trap.h index 541fd04103..0b4e53e998 100644 --- a/user/parlib/include/parlib/x86/trap.h +++ b/user/parlib/include/parlib/x86/trap.h @@ -8,10 +8,13 @@ #include #include +#include __BEGIN_DECLS #define HW_TRAP_DIV_ZERO 0 +#define HW_TRAP_DEBUG 1 +#define HW_TRAP_BRKPT 3 #define HW_TRAP_GP_FAULT 13 #define HW_TRAP_PAGE_FAULT 14 @@ -59,4 +62,18 @@ static unsigned long __arch_refl_get_aux(struct user_context *ctx) ctx->tf.hw_tf.tf_padding4; } +static uintptr_t get_user_ctx_pc(struct user_context *ctx) +{ + switch (ctx->type) { + case ROS_HW_CTX: + return ctx->tf.hw_tf.tf_rip; + case ROS_SW_CTX: + return ctx->tf.sw_tf.tf_rip; + case ROS_VM_CTX: + return ctx->tf.vm_tf.tf_rip; + default: + panic("Bad context type %d for ctx %p\n", ctx->type, ctx); + } +} + __END_DECLS diff --git a/user/parlib/thread0_sched.c b/user/parlib/thread0_sched.c index 45a47a5d1f..25aa78b371 100644 --- a/user/parlib/thread0_sched.c +++ b/user/parlib/thread0_sched.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015 Google, Inc. +/* Copyright (c) 2015-2016 Google, Inc. * Barret Rhoden * See LICENSE for details. * @@ -7,13 +7,14 @@ * * This is closely coupled with uthread.c */ -#include #include +#include +#include +#include #include -#include #include -#include -#include +#include +#include #include static void thread0_sched_entry(void); @@ -29,16 +30,24 @@ static void thread0_mtx_unlock(uth_mutex_t m); /* externed into uthread.c */ struct schedule_ops thread0_2ls_ops = { - .sched_entry = thread0_sched_entry, - .thread_blockon_sysc = thread0_thread_blockon_sysc, - .thread_refl_fault = thread0_thread_refl_fault, - .thread_runnable = thread0_thread_runnable, - .thread_paused = thread0_thread_runnable, - .thread_has_blocked = thread0_thread_has_blocked, - .mutex_alloc = thread0_mtx_alloc, - .mutex_free = thread0_mtx_free, - .mutex_lock = thread0_mtx_lock, - .mutex_unlock = thread0_mtx_unlock, + .sched_entry = thread0_sched_entry, + .thread_blockon_sysc = thread0_thread_blockon_sysc, + .thread_refl_fault = thread0_thread_refl_fault, + .thread_runnable = thread0_thread_runnable, + .thread_paused = thread0_thread_runnable, + .thread_has_blocked = thread0_thread_has_blocked, + .mutex_alloc = thread0_mtx_alloc, + .mutex_free = thread0_mtx_free, + .mutex_lock = thread0_mtx_lock, + .mutex_unlock = thread0_mtx_unlock, +}; + +static struct d9_ops thread0_d9ops = { + .read_memory = &d9s_read_memory, + .store_memory = &d9s_store_memory, + .fetch_registers = &d9_fetch_registers, + .store_registers = &d9_store_registers, + .resume = &d9s_resume, }; /* externed into uthread.c */ @@ -48,7 +57,7 @@ struct uthread *thread0_uth; * don't actually attach this mgmt info to it. But since we just have one * thread, it doesn't matter. */ struct thread0_info { - bool is_blocked; + bool is_blocked; }; static struct thread0_info thread0_info; static struct event_queue *sysc_evq; @@ -66,6 +75,9 @@ void thread0_lib_init(void) sysc_evq = get_eventq(EV_MBOX_BITMAP); sysc_evq->ev_flags = EVENT_INDIR | EVENT_WAKEUP; register_ev_handler(EV_SYSCALL, thread0_handle_syscall, 0); + + /* Make ourselves available for debugging */ + d9s_init(&thread0_d9ops); } /* Thread0 scheduler ops (for processes that haven't linked in a full 2LS) */ @@ -90,7 +102,7 @@ static void thread0_sched_entry(void) static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg) { - struct syscall *sysc = (struct syscall*)arg; + struct syscall *sysc = (struct syscall *)arg; thread0_thread_has_blocked(uthread, 0); if (!register_evq(sysc, sysc_evq)) thread0_thread_runnable(uthread); @@ -99,8 +111,8 @@ static void thread0_thread_blockon_sysc(struct uthread *uthread, void *arg) static void refl_error(struct uthread *uth, unsigned int trap_nr, unsigned int err, unsigned long aux) { - printf("Thread has unhandled fault: %d, err: %d, aux: %p\n", - trap_nr, err, aux); + printf("Thread has unhandled fault: %d, err: %d, aux: %p\n", trap_nr, err, + aux); /* Note that uthread.c already copied out our ctx into the uth * struct */ print_user_context(&uth->u_ctx); @@ -131,6 +143,12 @@ static void thread0_thread_refl_fault(struct uthread *uth, if (!handle_page_fault(uth, err, aux)) refl_error(uth, trap_nr, err, aux); break; + case HW_TRAP_BRKPT: + case HW_TRAP_DEBUG: + /* We only have one thread, no need to stop other threads. */ + uthread_has_blocked(uth, 0); + d9s_notify_hit_breakpoint(uth->id, 0); + break; default: refl_error(uth, trap_nr, err, aux); } diff --git a/user/parlib/uthread.c b/user/parlib/uthread.c index 520c94b4d2..bd164d51eb 100644 --- a/user/parlib/uthread.c +++ b/user/parlib/uthread.c @@ -1,16 +1,17 @@ /* Copyright (c) 2011-2014 The Regents of the University of California + * Copyright (c) 2016 Google Inc. * Barret Rhoden * See LICENSE for details. */ - -#include #include +#include +#include +#include +#include +#include #include -#include #include -#include +#include #include -#include -#include /* SCPs have a default 2LS that only manages thread 0. Any other 2LS, such as * pthreads, should override sched_ops in its init code. */ @@ -22,13 +23,20 @@ __thread struct uthread *current_uthread = 0; * extensively about the details. Will call out when necessary. */ static struct event_queue *preempt_ev_q; +/* Thread list and associated lock. */ +LIST_HEAD(uthread_list, uthread); +static struct uthread_list all_uthreads = LIST_HEAD_INITIALIZER(all_uthreads); +static struct spin_pdr_lock thread_list_lock = SPINPDR_INITIALIZER; + /* Helpers: */ #define UTH_TLSDESC_NOTLS (void*)(-1) +static uint64_t __get_next_tid(void); static inline bool __uthread_has_tls(struct uthread *uthread); static int __uthread_allocate_tls(struct uthread *uthread); static int __uthread_reinit_tls(struct uthread *uthread); static void __uthread_free_tls(struct uthread *uthread); static void __run_current_uthread_raw(void); +static void uthread_assign_id(struct uthread *uthread); static void handle_vc_preempt(struct event_msg *ev_msg, unsigned int ev_type, void *data); @@ -55,8 +63,18 @@ static void uthread_init_thread0(struct uthread *uthread) /* need to track thread0 for TLS deallocation */ uthread->flags |= UTHREAD_IS_THREAD0; uthread->notif_disabled_depth = 0; - /* setting the uthread's TLS var. this is idempotent for SCPs (us) */ + /* setting the uthread's TLS var. this is idempotent for SCPs (us) */ __vcoreid = 0; + uthread_assign_id(uthread); +} + +static void uthread_assign_id(struct uthread *uthread) +{ + /* Assign a thread ID and add to thread list. */ + spin_pdr_lock(&thread_list_lock); + uthread->id = __get_next_tid(); + LIST_INSERT_HEAD(&all_uthreads, uthread, entry); + spin_pdr_unlock(&thread_list_lock); } /* Helper, makes VC ctx tracks uthread as its current_uthread in its TLS. @@ -273,6 +291,16 @@ void __attribute__((noreturn)) uthread_vcore_entry(void) assert(0); /* 2LS sched_entry should never return */ } +/* __get_next_tid returns an unused thread id. + * + * Warning: this will reuse numbers eventually. */ +static uint64_t __get_next_tid(void) +{ + static uint64_t next_tid = 1; + + return next_tid++; +} + /* Does the uthread initialization of a uthread that the caller created. Call * this whenever you are "starting over" with a thread. */ void uthread_init(struct uthread *new_thread, struct uth_thread_attr *attr) @@ -291,6 +319,11 @@ void uthread_init(struct uthread *new_thread, struct uth_thread_attr *attr) * were interrupted off a core. */ new_thread->flags |= UTHREAD_SAVED; new_thread->notif_disabled_depth = 0; + + uthread_assign_id(new_thread); + + d9s_notify_add_thread(new_thread->id); + if (attr && attr->want_tls) { /* Get a TLS. If we already have one, reallocate/refresh it */ if (new_thread->tls_desc) @@ -489,6 +522,11 @@ void uthread_sleep_forever(void) void uthread_cleanup(struct uthread *uthread) { printd("[U] thread %08p on vcore %d is DYING!\n", uthread, vcore_id()); + + spin_pdr_lock(&thread_list_lock); + LIST_REMOVE(uthread, entry); + spin_pdr_unlock(&thread_list_lock); + /* we alloc and manage the TLS, so lets get rid of it, except for thread0. * glibc owns it. might need to keep it around for a full exit() */ if (__uthread_has_tls(uthread) && !(uthread->flags & UTHREAD_IS_THREAD0)) @@ -1149,3 +1187,37 @@ static void __uthread_free_tls(struct uthread *uthread) free_tls(uthread->tls_desc); uthread->tls_desc = NULL; } + +void uthread_apply_all(void (*fn)(struct uthread *)) +{ + struct uthread *t = NULL; + + spin_pdr_lock(&thread_list_lock); + LIST_FOREACH(t, &all_uthreads, entry) + fn(t); + + spin_pdr_unlock(&thread_list_lock); +} + +/* TODO(chrisko): hash table instead of list. */ +struct uthread *uthread_get_thread_by_id(uint64_t id) +{ + struct uthread *t = NULL, *ret = NULL; + + spin_pdr_lock(&thread_list_lock); + LIST_FOREACH(t, &all_uthreads, entry) { + if (t->id == id) { + ret = t; + break; + } + } + + /* TODO: increase ref count on thread when we have ref counting. */ + spin_pdr_unlock(&thread_list_lock); + return ret; +} + +void uthread_put_thread(struct uthread *uth) +{ + /* TODO: drop reference to thread. */ +} diff --git a/user/parlib/vcore.c b/user/parlib/vcore.c index 1783d9810a..c46dc43030 100644 --- a/user/parlib/vcore.c +++ b/user/parlib/vcore.c @@ -156,6 +156,8 @@ static int prep_remaining_vcores(void) assert((void*)mmap_block != MAP_FAILED); for (int i = 1; i < max_vcores(); i++) __prep_vcore(i, mmap_block + 4 * (i - 1) * PGSIZE); + + return 0; } /* Run libc specific early setup code. */ diff --git a/user/parlib/x86/debug.c b/user/parlib/x86/debug.c new file mode 100644 index 0000000000..9fe751b17b --- /dev/null +++ b/user/parlib/x86/debug.c @@ -0,0 +1,125 @@ +#include +#include +#include + +void uthread_enable_single_step(struct uthread *t) +{ + switch (t->u_ctx.type) { + case ROS_HW_CTX: + t->u_ctx.tf.hw_tf.tf_rflags |= FL_TF; + break; + default: + panic("bad context type\n"); + } +} + +void uthread_disable_single_step(struct uthread *t) +{ + switch (t->u_ctx.type) { + case ROS_HW_CTX: + t->u_ctx.tf.hw_tf.tf_rflags &= ~FL_TF; + break; + default: + panic("bad context type\n"); + } +} + +/* TODO(chrisko): add a way to signal that a register isn't supplied for sw + * contexts; because gdbserver has a notion of not knowing a register's value. + */ + +int d9_fetch_registers(struct uthread *t, struct d9_regs *regs) +{ +#define reg_from_hwtf(fld) \ + regs->reg_##fld = t->u_ctx.tf.hw_tf.tf_##fld + +#define reg_from_swtf(fld) \ + regs->reg_##fld = t->u_ctx.tf.sw_tf.tf_##fld + + switch (t->u_ctx.type) { + case ROS_HW_CTX: + reg_from_hwtf(rax); + reg_from_hwtf(rbx); + reg_from_hwtf(rcx); + reg_from_hwtf(rdx); + reg_from_hwtf(rsi); + reg_from_hwtf(rdi); + reg_from_hwtf(rbp); + reg_from_hwtf(rsp); + reg_from_hwtf(r8); + reg_from_hwtf(r9); + reg_from_hwtf(r10); + reg_from_hwtf(r11); + reg_from_hwtf(r12); + reg_from_hwtf(r13); + reg_from_hwtf(r14); + reg_from_hwtf(r15); + reg_from_hwtf(rip); + reg_from_hwtf(cs); + reg_from_hwtf(ss); + regs->reg_eflags = (uint32_t) t->u_ctx.tf.hw_tf.tf_rflags; + break; + case ROS_SW_CTX: + reg_from_hwtf(rbx); + reg_from_hwtf(rbp); + reg_from_hwtf(rsp); + reg_from_hwtf(rip); + reg_from_swtf(r12); + reg_from_swtf(r13); + reg_from_swtf(r14); + reg_from_swtf(r15); + break; + case ROS_VM_CTX: + panic("2LS debug: VM context unsupported\n"); + } + + return 0; +} + +int d9_store_registers(struct uthread *t, struct d9_regs *regs) +{ +#define reg_to_hwtf(fld) \ + t->u_ctx.tf.hw_tf.tf_##fld = regs->reg_##fld + +#define reg_to_swtf(fld) \ + t->u_ctx.tf.sw_tf.tf_##fld = regs->reg_##fld + + switch (t->u_ctx.type) { + case ROS_HW_CTX: + reg_to_hwtf(rax); + reg_to_hwtf(rbx); + reg_to_hwtf(rcx); + reg_to_hwtf(rdx); + reg_to_hwtf(rsi); + reg_to_hwtf(rdi); + reg_to_hwtf(rbp); + reg_to_hwtf(rsp); + reg_to_hwtf(r8); + reg_to_hwtf(r9); + reg_to_hwtf(r10); + reg_to_hwtf(r11); + reg_to_hwtf(r12); + reg_to_hwtf(r13); + reg_to_hwtf(r14); + reg_to_hwtf(r15); + reg_to_hwtf(rip); + reg_to_hwtf(cs); + reg_to_hwtf(ss); + t->u_ctx.tf.hw_tf.tf_rflags = regs->reg_eflags; + break; + case ROS_SW_CTX: + reg_to_hwtf(rbx); + reg_to_hwtf(rip); + reg_to_hwtf(rsp); + reg_to_hwtf(rbp); + reg_to_hwtf(r12); + reg_to_hwtf(r13); + reg_to_hwtf(r14); + reg_to_hwtf(r15); + break; + case ROS_VM_CTX: + panic("2LS debug: VM context unsupported\n"); + } + + return 0; +} diff --git a/user/pthread/pthread.c b/user/pthread/pthread.c index 9a6691ec9b..39daa5c925 100644 --- a/user/pthread/pthread.c +++ b/user/pthread/pthread.c @@ -31,7 +31,6 @@ bool need_tls = TRUE; struct sysc_mgmt *sysc_mgmt = 0; /* Helper / local functions */ -static int get_next_pid(void); static inline void spin_to_sleep(unsigned int spins, unsigned int *spun); static inline void pthread_exit_no_cleanup(void *ret); @@ -380,13 +379,6 @@ static int __pthread_allocate_stack(struct pthread_tcb *pt) return 0; } -// Warning, this will reuse numbers eventually -static int get_next_pid(void) -{ - static uint32_t next_pid = 0; - return next_pid++; -} - int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize) { attr->stacksize = stacksize; @@ -448,14 +440,12 @@ void __attribute__((constructor)) pthread_lib_init(void) sizeof(struct pthread_tcb)); assert(!ret); memset(t, 0, sizeof(struct pthread_tcb)); /* aggressively 0 for bugs */ - t->id = get_next_pid(); t->stacksize = USTACK_NUM_PAGES * PGSIZE; t->stacktop = (void*)USTACKTOP; t->detached = TRUE; t->state = PTH_RUNNING; t->joiner = 0; /* implies that sigmasks are longs, which they are. */ - assert(t->id == 0); t->sched_policy = SCHED_FIFO; t->sched_priority = 0; SLIST_INIT(&t->cr_stack); @@ -554,7 +544,6 @@ int __pthread_create(pthread_t *thread, const pthread_attr_t *attr, memset(pthread, 0, sizeof(struct pthread_tcb)); /* aggressively 0 for bugs*/ pthread->stacksize = PTHREAD_STACK_SIZE; /* default */ pthread->state = PTH_CREATED; - pthread->id = get_next_pid(); pthread->detached = FALSE; /* default */ pthread->joiner = 0; /* Might override these later, based on attr && EXPLICIT_SCHED */ @@ -1086,12 +1075,12 @@ int pthread_barrier_wait(pthread_barrier_t *b) struct pthread_list restartees = SLIST_HEAD_INITIALIZER(restartees); struct pthread_tcb *pthread_i; struct barrier_junk local_junk; - + long old_count = atomic_fetch_and_add(&b->count, -1); if (old_count == 1) { printd("Thread %d is last to hit the barrier, resetting...\n", - pthread_self()->id); + pthread_self()->uthread.id); /* TODO: we might want to grab the lock right away, so a few short * circuit faster? */ atomic_set(&b->count, b->total_threads); diff --git a/user/pthread/pthread.h b/user/pthread/pthread.h index ca5a3cc65b..b45762a996 100644 --- a/user/pthread/pthread.h +++ b/user/pthread/pthread.h @@ -46,7 +46,6 @@ struct pthread_tcb { int state; bool detached; struct pthread_tcb *joiner; /* raced on by exit and join */ - uint32_t id; uint32_t stacksize; void *stacktop; void *(*start_routine)(void*);