[tor-commits] [tor/maint-0.2.7] Make sure channel_t queues its own copy of incoming cells

andrea at torproject.org andrea at torproject.org
Tue Mar 29 15:12:44 UTC 2016


commit bd87d37a861c541afbeb660b4d8dd62df14d5b45
Author: Andrea Shepard <andrea at torproject.org>
Date:   Tue Mar 15 07:40:19 2016 +0000

    Make sure channel_t queues its own copy of incoming cells
---
 src/or/channel.c       | 38 ++++++++++++++++++++++++++++++++++----
 src/or/channeltls.c    | 11 +++++++++++
 src/or/connection_or.c | 35 +++++++++++++++++++++++++++++++++++
 src/or/connection_or.h |  1 +
 4 files changed, 81 insertions(+), 4 deletions(-)

diff --git a/src/or/channel.c b/src/or/channel.c
index 21522a5..62a21be 100644
--- a/src/or/channel.c
+++ b/src/or/channel.c
@@ -2652,6 +2652,11 @@ channel_process_cells(channel_t *chan)
   /*
    * Process cells until we're done or find one we have no current handler
    * for.
+   *
+   * We must free the cells here after calling the handler, since custody
+   * of the buffer was given to the channel layer when they were queued;
+   * see comments on memory management in channel_queue_cell() and in
+   * channel_queue_var_cell() below.
    */
   while (NULL != (q = TOR_SIMPLEQ_FIRST(&chan->incoming_queue))) {
     tor_assert(q);
@@ -2669,6 +2674,7 @@ channel_process_cells(channel_t *chan)
                 q->u.fixed.cell, chan,
                 U64_PRINTF_ARG(chan->global_identifier));
       chan->cell_handler(chan, q->u.fixed.cell);
+      tor_free(q->u.fixed.cell);
       tor_free(q);
     } else if (q->type == CELL_QUEUE_VAR &&
                chan->var_cell_handler) {
@@ -2681,6 +2687,7 @@ channel_process_cells(channel_t *chan)
                 q->u.var.var_cell, chan,
                 U64_PRINTF_ARG(chan->global_identifier));
       chan->var_cell_handler(chan, q->u.var.var_cell);
+      tor_free(q->u.var.var_cell);
       tor_free(q);
     } else {
       /* Can't handle this one */
@@ -2701,6 +2708,7 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
 {
   int need_to_queue = 0;
   cell_queue_entry_t *q;
+  cell_t *cell_copy = NULL;
 
   tor_assert(chan);
   tor_assert(cell);
@@ -2728,8 +2736,19 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
               U64_PRINTF_ARG(chan->global_identifier));
     chan->cell_handler(chan, cell);
   } else {
-    /* Otherwise queue it and then process the queue if possible. */
-    q = cell_queue_entry_new_fixed(cell);
+    /*
+     * Otherwise queue it and then process the queue if possible.
+     *
+     * We queue a copy, not the original pointer - it might have been on the
+     * stack in connection_or_process_cells_from_inbuf() (or another caller
+     * if we ever have a subclass other than channel_tls_t), or be freed
+     * there after we return.  This is the uncommon case; the non-copying
+     * fast path occurs in the if (!need_to_queue) case above when the
+     * upper layer has installed cell handlers.
+     */
+    cell_copy = tor_malloc_zero(sizeof(cell_t));
+    memcpy(cell_copy, cell, sizeof(cell_t));
+    q = cell_queue_entry_new_fixed(cell_copy);
     log_debug(LD_CHANNEL,
               "Queueing incoming cell_t %p for channel %p "
               "(global ID " U64_FORMAT ")",
@@ -2755,6 +2774,7 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
 {
   int need_to_queue = 0;
   cell_queue_entry_t *q;
+  var_cell_t *cell_copy = NULL;
 
   tor_assert(chan);
   tor_assert(var_cell);
@@ -2783,8 +2803,18 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
               U64_PRINTF_ARG(chan->global_identifier));
     chan->var_cell_handler(chan, var_cell);
   } else {
-    /* Otherwise queue it and then process the queue if possible. */
-    q = cell_queue_entry_new_var(var_cell);
+    /*
+     * Otherwise queue it and then process the queue if possible.
+     *
+     * We queue a copy, not the original pointer - it might have been on the
+     * stack in connection_or_process_cells_from_inbuf() (or another caller
+     * if we ever have a subclass other than channel_tls_t), or be freed
+     * there after we return.  This is the uncommon case; the non-copying
+     * fast path occurs in the if (!need_to_queue) case above when the
+     * upper layer has installed cell handlers.
+     */
+    cell_copy = var_cell_copy(var_cell);
+    q = cell_queue_entry_new_var(cell_copy);
     log_debug(LD_CHANNEL,
               "Queueing incoming var_cell_t %p for channel %p "
               "(global ID " U64_FORMAT ")",
diff --git a/src/or/channeltls.c b/src/or/channeltls.c
index c90f569..2a84514 100644
--- a/src/or/channeltls.c
+++ b/src/or/channeltls.c
@@ -1009,6 +1009,11 @@ channel_tls_time_process_cell(cell_t *cell, channel_tls_t *chan, int *time,
  * for cell types specific to the handshake for this transport protocol and
  * handles them, and queues all other cells to the channel_t layer, which
  * eventually will hand them off to command.c.
+ *
+ * The channel layer itself decides whether the cell should be queued or
+ * can be handed off immediately to the upper-layer code.  It is responsible
+ * for copying in the case that it queues; we merely pass pointers through
+ * which we get from connection_or_process_cells_from_inbuf().
  */
 
 void
@@ -1106,6 +1111,12 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
  * related and live below the channel_t layer, so no variable-length
  * cells ever get delivered in the current implementation, but I've left
  * the mechanism in place for future use.
+ *
+ * If we were handing them off to the upper layer, the channel_t queueing
+ * code would be responsible for memory management, and we'd just be passing
+ * pointers through from connection_or_process_cells_from_inbuf().  That
+ * caller always frees them after this function returns, so this function
+ * should never free var_cell.
  */
 
 void
diff --git a/src/or/connection_or.c b/src/or/connection_or.c
index a967c93..9944494 100644
--- a/src/or/connection_or.c
+++ b/src/or/connection_or.c
@@ -488,6 +488,28 @@ var_cell_new(uint16_t payload_len)
   return cell;
 }
 
+/**
+ * Copy a var_cell_t
+ */
+
+var_cell_t *
+var_cell_copy(const var_cell_t *src)
+{
+  var_cell_t *copy = NULL;
+  size_t size = 0;
+
+  if (src != NULL) {
+    size = STRUCT_OFFSET(var_cell_t, payload) + src->payload_len;
+    copy = tor_malloc_zero(size);
+    copy->payload_len = src->payload_len;
+    copy->command = src->command;
+    copy->circ_id = src->circ_id;
+    memcpy(copy->payload, src->payload, copy->payload_len);
+  }
+
+  return copy;
+}
+
 /** Release all space held by <b>cell</b>. */
 void
 var_cell_free(var_cell_t *cell)
@@ -2060,6 +2082,19 @@ connection_or_process_cells_from_inbuf(or_connection_t *conn)
 {
   var_cell_t *var_cell;
 
+  /*
+   * Note on memory management for incoming cells: below the channel layer,
+   * we shouldn't need to consider its internal queueing/copying logic.  It
+   * is safe to pass cells to it on the stack or on the heap, but in the
+   * latter case we must be sure we free them later.
+   *
+   * The incoming cell queue code in channel.c will (in the common case)
+   * decide it can pass them to the upper layer immediately, in which case
+   * those functions may run directly on the cell pointers we pass here, or
+   * it may decide to queue them, in which case it will allocate its own
+   * buffer and copy the cell.
+   */
+
   while (1) {
     log_debug(LD_OR,
               TOR_SOCKET_T_FORMAT": starting, inbuf_datalen %d "
diff --git a/src/or/connection_or.h b/src/or/connection_or.h
index 3877fd5..0bd8567 100644
--- a/src/or/connection_or.h
+++ b/src/or/connection_or.h
@@ -97,6 +97,7 @@ void cell_pack(packed_cell_t *dest, const cell_t *src, int wide_circ_ids);
 int var_cell_pack_header(const var_cell_t *cell, char *hdr_out,
                          int wide_circ_ids);
 var_cell_t *var_cell_new(uint16_t payload_len);
+var_cell_t *var_cell_copy(const var_cell_t *src);
 void var_cell_free(var_cell_t *cell);
 
 /** DOCDOC */





More information about the tor-commits mailing list