mirror of
https://gitlab.torproject.org/tpo/core/tor.git
synced 2024-11-20 18:22:09 +01:00
Merge branch 'bug18570_027' into maint-0.2.7
This commit is contained in:
commit
0b45cab147
7
changes/bug18570
Normal file
7
changes/bug18570
Normal file
@ -0,0 +1,7 @@
|
||||
o Minor bugfixes (correctness):
|
||||
- Fix a bad memory handling bug that would occur if we had queued
|
||||
a cell on a channel's incoming queue. Fortunately, we can't actually
|
||||
queue a cell like that as our code is constructed today, but it's best
|
||||
to avoid this kind of error, even if there isn't any code that triggers
|
||||
it today. Fixes bug 18570; bugfix on 0.2.4.4-alpha.
|
||||
|
@ -2652,6 +2652,11 @@ channel_process_cells(channel_t *chan)
|
||||
/*
|
||||
* Process cells until we're done or find one we have no current handler
|
||||
* for.
|
||||
*
|
||||
* We must free the cells here after calling the handler, since custody
|
||||
* of the buffer was given to the channel layer when they were queued;
|
||||
* see comments on memory management in channel_queue_cell() and in
|
||||
* channel_queue_var_cell() below.
|
||||
*/
|
||||
while (NULL != (q = TOR_SIMPLEQ_FIRST(&chan->incoming_queue))) {
|
||||
tor_assert(q);
|
||||
@ -2669,6 +2674,7 @@ channel_process_cells(channel_t *chan)
|
||||
q->u.fixed.cell, chan,
|
||||
U64_PRINTF_ARG(chan->global_identifier));
|
||||
chan->cell_handler(chan, q->u.fixed.cell);
|
||||
tor_free(q->u.fixed.cell);
|
||||
tor_free(q);
|
||||
} else if (q->type == CELL_QUEUE_VAR &&
|
||||
chan->var_cell_handler) {
|
||||
@ -2681,6 +2687,7 @@ channel_process_cells(channel_t *chan)
|
||||
q->u.var.var_cell, chan,
|
||||
U64_PRINTF_ARG(chan->global_identifier));
|
||||
chan->var_cell_handler(chan, q->u.var.var_cell);
|
||||
tor_free(q->u.var.var_cell);
|
||||
tor_free(q);
|
||||
} else {
|
||||
/* Can't handle this one */
|
||||
@ -2701,6 +2708,7 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
|
||||
{
|
||||
int need_to_queue = 0;
|
||||
cell_queue_entry_t *q;
|
||||
cell_t *cell_copy = NULL;
|
||||
|
||||
tor_assert(chan);
|
||||
tor_assert(cell);
|
||||
@ -2728,8 +2736,19 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
|
||||
U64_PRINTF_ARG(chan->global_identifier));
|
||||
chan->cell_handler(chan, cell);
|
||||
} else {
|
||||
/* Otherwise queue it and then process the queue if possible. */
|
||||
q = cell_queue_entry_new_fixed(cell);
|
||||
/*
|
||||
* Otherwise queue it and then process the queue if possible.
|
||||
*
|
||||
* We queue a copy, not the original pointer - it might have been on the
|
||||
* stack in connection_or_process_cells_from_inbuf() (or another caller
|
||||
* if we ever have a subclass other than channel_tls_t), or be freed
|
||||
* there after we return. This is the uncommon case; the non-copying
|
||||
* fast path occurs in the if (!need_to_queue) case above when the
|
||||
* upper layer has installed cell handlers.
|
||||
*/
|
||||
cell_copy = tor_malloc_zero(sizeof(cell_t));
|
||||
memcpy(cell_copy, cell, sizeof(cell_t));
|
||||
q = cell_queue_entry_new_fixed(cell_copy);
|
||||
log_debug(LD_CHANNEL,
|
||||
"Queueing incoming cell_t %p for channel %p "
|
||||
"(global ID " U64_FORMAT ")",
|
||||
@ -2755,6 +2774,7 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
|
||||
{
|
||||
int need_to_queue = 0;
|
||||
cell_queue_entry_t *q;
|
||||
var_cell_t *cell_copy = NULL;
|
||||
|
||||
tor_assert(chan);
|
||||
tor_assert(var_cell);
|
||||
@ -2783,8 +2803,18 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
|
||||
U64_PRINTF_ARG(chan->global_identifier));
|
||||
chan->var_cell_handler(chan, var_cell);
|
||||
} else {
|
||||
/* Otherwise queue it and then process the queue if possible. */
|
||||
q = cell_queue_entry_new_var(var_cell);
|
||||
/*
|
||||
* Otherwise queue it and then process the queue if possible.
|
||||
*
|
||||
* We queue a copy, not the original pointer - it might have been on the
|
||||
* stack in connection_or_process_cells_from_inbuf() (or another caller
|
||||
* if we ever have a subclass other than channel_tls_t), or be freed
|
||||
* there after we return. This is the uncommon case; the non-copying
|
||||
* fast path occurs in the if (!need_to_queue) case above when the
|
||||
* upper layer has installed cell handlers.
|
||||
*/
|
||||
cell_copy = var_cell_copy(var_cell);
|
||||
q = cell_queue_entry_new_var(cell_copy);
|
||||
log_debug(LD_CHANNEL,
|
||||
"Queueing incoming var_cell_t %p for channel %p "
|
||||
"(global ID " U64_FORMAT ")",
|
||||
|
@ -1009,6 +1009,11 @@ channel_tls_time_process_cell(cell_t *cell, channel_tls_t *chan, int *time,
|
||||
* for cell types specific to the handshake for this transport protocol and
|
||||
* handles them, and queues all other cells to the channel_t layer, which
|
||||
* eventually will hand them off to command.c.
|
||||
*
|
||||
* The channel layer itself decides whether the cell should be queued or
|
||||
* can be handed off immediately to the upper-layer code. It is responsible
|
||||
* for copying in the case that it queues; we merely pass pointers through
|
||||
* which we get from connection_or_process_cells_from_inbuf().
|
||||
*/
|
||||
|
||||
void
|
||||
@ -1106,6 +1111,12 @@ channel_tls_handle_cell(cell_t *cell, or_connection_t *conn)
|
||||
* related and live below the channel_t layer, so no variable-length
|
||||
* cells ever get delivered in the current implementation, but I've left
|
||||
* the mechanism in place for future use.
|
||||
*
|
||||
* If we were handing them off to the upper layer, the channel_t queueing
|
||||
* code would be responsible for memory management, and we'd just be passing
|
||||
* pointers through from connection_or_process_cells_from_inbuf(). That
|
||||
* caller always frees them after this function returns, so this function
|
||||
* should never free var_cell.
|
||||
*/
|
||||
|
||||
void
|
||||
|
@ -488,6 +488,28 @@ var_cell_new(uint16_t payload_len)
|
||||
return cell;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy a var_cell_t
|
||||
*/
|
||||
|
||||
var_cell_t *
|
||||
var_cell_copy(const var_cell_t *src)
|
||||
{
|
||||
var_cell_t *copy = NULL;
|
||||
size_t size = 0;
|
||||
|
||||
if (src != NULL) {
|
||||
size = STRUCT_OFFSET(var_cell_t, payload) + src->payload_len;
|
||||
copy = tor_malloc_zero(size);
|
||||
copy->payload_len = src->payload_len;
|
||||
copy->command = src->command;
|
||||
copy->circ_id = src->circ_id;
|
||||
memcpy(copy->payload, src->payload, copy->payload_len);
|
||||
}
|
||||
|
||||
return copy;
|
||||
}
|
||||
|
||||
/** Release all space held by <b>cell</b>. */
|
||||
void
|
||||
var_cell_free(var_cell_t *cell)
|
||||
@ -2060,6 +2082,19 @@ connection_or_process_cells_from_inbuf(or_connection_t *conn)
|
||||
{
|
||||
var_cell_t *var_cell;
|
||||
|
||||
/*
|
||||
* Note on memory management for incoming cells: below the channel layer,
|
||||
* we shouldn't need to consider its internal queueing/copying logic. It
|
||||
* is safe to pass cells to it on the stack or on the heap, but in the
|
||||
* latter case we must be sure we free them later.
|
||||
*
|
||||
* The incoming cell queue code in channel.c will (in the common case)
|
||||
* decide it can pass them to the upper layer immediately, in which case
|
||||
* those functions may run directly on the cell pointers we pass here, or
|
||||
* it may decide to queue them, in which case it will allocate its own
|
||||
* buffer and copy the cell.
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
log_debug(LD_OR,
|
||||
TOR_SOCKET_T_FORMAT": starting, inbuf_datalen %d "
|
||||
|
@ -97,6 +97,7 @@ void cell_pack(packed_cell_t *dest, const cell_t *src, int wide_circ_ids);
|
||||
int var_cell_pack_header(const var_cell_t *cell, char *hdr_out,
|
||||
int wide_circ_ids);
|
||||
var_cell_t *var_cell_new(uint16_t payload_len);
|
||||
var_cell_t *var_cell_copy(const var_cell_t *src);
|
||||
void var_cell_free(var_cell_t *cell);
|
||||
|
||||
/** DOCDOC */
|
||||
|
@ -25,7 +25,9 @@ extern uint64_t estimated_total_queue_size;
|
||||
|
||||
static int test_chan_accept_cells = 0;
|
||||
static int test_chan_fixed_cells_recved = 0;
|
||||
static cell_t * test_chan_last_seen_fixed_cell_ptr = NULL;
|
||||
static int test_chan_var_cells_recved = 0;
|
||||
static var_cell_t * test_chan_last_seen_var_cell_ptr = NULL;
|
||||
static int test_cells_written = 0;
|
||||
static int test_destroy_not_pending_calls = 0;
|
||||
static int test_doesnt_want_writes_count = 0;
|
||||
@ -70,6 +72,7 @@ static void test_channel_flushmux(void *arg);
|
||||
static void test_channel_incoming(void *arg);
|
||||
static void test_channel_lifecycle(void *arg);
|
||||
static void test_channel_multi(void *arg);
|
||||
static void test_channel_queue_incoming(void *arg);
|
||||
static void test_channel_queue_size(void *arg);
|
||||
static void test_channel_write(void *arg);
|
||||
|
||||
@ -179,7 +182,7 @@ chan_test_cell_handler(channel_t *ch,
|
||||
tt_assert(ch);
|
||||
tt_assert(cell);
|
||||
|
||||
tor_free(cell);
|
||||
test_chan_last_seen_fixed_cell_ptr = cell;
|
||||
++test_chan_fixed_cells_recved;
|
||||
|
||||
done:
|
||||
@ -214,7 +217,7 @@ chan_test_var_cell_handler(channel_t *ch,
|
||||
tt_assert(ch);
|
||||
tt_assert(var_cell);
|
||||
|
||||
tor_free(var_cell);
|
||||
test_chan_last_seen_var_cell_ptr = var_cell;
|
||||
++test_chan_var_cells_recved;
|
||||
|
||||
done:
|
||||
@ -608,7 +611,7 @@ test_channel_dumpstats(void *arg)
|
||||
make_fake_cell(cell);
|
||||
old_count = test_chan_fixed_cells_recved;
|
||||
channel_queue_cell(ch, cell);
|
||||
cell = NULL;
|
||||
tor_free(cell);
|
||||
tt_int_op(test_chan_fixed_cells_recved, ==, old_count + 1);
|
||||
tt_assert(ch->n_bytes_recved > 0);
|
||||
tt_assert(ch->n_cells_recved > 0);
|
||||
@ -819,7 +822,7 @@ test_channel_incoming(void *arg)
|
||||
make_fake_cell(cell);
|
||||
old_count = test_chan_fixed_cells_recved;
|
||||
channel_queue_cell(ch, cell);
|
||||
cell = NULL;
|
||||
tor_free(cell);
|
||||
tt_int_op(test_chan_fixed_cells_recved, ==, old_count + 1);
|
||||
|
||||
/* Receive a variable-size cell */
|
||||
@ -827,7 +830,7 @@ test_channel_incoming(void *arg)
|
||||
make_fake_var_cell(var_cell);
|
||||
old_count = test_chan_var_cells_recved;
|
||||
channel_queue_var_cell(ch, var_cell);
|
||||
var_cell = NULL;
|
||||
tor_free(cell);
|
||||
tt_int_op(test_chan_var_cells_recved, ==, old_count + 1);
|
||||
|
||||
/* Close it */
|
||||
@ -1422,6 +1425,113 @@ test_channel_queue_impossible(void *arg)
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
test_channel_queue_incoming(void *arg)
|
||||
{
|
||||
channel_t *ch = NULL;
|
||||
cell_t *cell = NULL;
|
||||
var_cell_t *var_cell = NULL;
|
||||
int old_fixed_count, old_var_count;
|
||||
|
||||
(void)arg;
|
||||
|
||||
/* Mock these for duration of the test */
|
||||
MOCK(scheduler_channel_doesnt_want_writes,
|
||||
scheduler_channel_doesnt_want_writes_mock);
|
||||
MOCK(scheduler_release_channel,
|
||||
scheduler_release_channel_mock);
|
||||
|
||||
/* Accept cells to lower layer */
|
||||
test_chan_accept_cells = 1;
|
||||
/* Use default overhead factor */
|
||||
test_overhead_estimate = 1.0f;
|
||||
|
||||
ch = new_fake_channel();
|
||||
tt_assert(ch);
|
||||
/* Start it off in OPENING */
|
||||
ch->state = CHANNEL_STATE_OPENING;
|
||||
/* We'll need a cmux */
|
||||
ch->cmux = circuitmux_alloc();
|
||||
|
||||
/* Test cell handler getters */
|
||||
tt_ptr_op(channel_get_cell_handler(ch), ==, NULL);
|
||||
tt_ptr_op(channel_get_var_cell_handler(ch), ==, NULL);
|
||||
|
||||
/* Try to register it */
|
||||
channel_register(ch);
|
||||
tt_assert(ch->registered);
|
||||
|
||||
/* Open it */
|
||||
channel_change_state(ch, CHANNEL_STATE_OPEN);
|
||||
tt_int_op(ch->state, ==, CHANNEL_STATE_OPEN);
|
||||
|
||||
/* Assert that the incoming queue is empty */
|
||||
tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
|
||||
|
||||
/* Queue an incoming fixed-length cell */
|
||||
cell = tor_malloc_zero(sizeof(cell_t));
|
||||
make_fake_cell(cell);
|
||||
channel_queue_cell(ch, cell);
|
||||
|
||||
/* Assert that the incoming queue has one entry */
|
||||
tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), ==, 1);
|
||||
|
||||
/* Queue an incoming var cell */
|
||||
var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
|
||||
make_fake_var_cell(var_cell);
|
||||
channel_queue_var_cell(ch, var_cell);
|
||||
|
||||
/* Assert that the incoming queue has two entries */
|
||||
tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), ==, 2);
|
||||
|
||||
/*
|
||||
* Install cell handlers; this will drain the queue, so save the old
|
||||
* cell counters first
|
||||
*/
|
||||
old_fixed_count = test_chan_fixed_cells_recved;
|
||||
old_var_count = test_chan_var_cells_recved;
|
||||
channel_set_cell_handlers(ch,
|
||||
chan_test_cell_handler,
|
||||
chan_test_var_cell_handler);
|
||||
tt_ptr_op(channel_get_cell_handler(ch), ==, chan_test_cell_handler);
|
||||
tt_ptr_op(channel_get_var_cell_handler(ch), ==, chan_test_var_cell_handler);
|
||||
|
||||
/* Assert cells were received */
|
||||
tt_int_op(test_chan_fixed_cells_recved, ==, old_fixed_count + 1);
|
||||
tt_int_op(test_chan_var_cells_recved, ==, old_var_count + 1);
|
||||
|
||||
/*
|
||||
* Assert that the pointers are different from the cells we allocated;
|
||||
* when queueing cells with no incoming cell handlers installed, the
|
||||
* channel layer should copy them to a new buffer, and free them after
|
||||
* delivery. These pointers will have already been freed by the time
|
||||
* we get here, so don't dereference them.
|
||||
*/
|
||||
tt_ptr_op(test_chan_last_seen_fixed_cell_ptr, !=, cell);
|
||||
tt_ptr_op(test_chan_last_seen_var_cell_ptr, !=, var_cell);
|
||||
|
||||
/* Assert queue is now empty */
|
||||
tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
|
||||
|
||||
/* Close it; this contains an assertion that the incoming queue is empty */
|
||||
channel_mark_for_close(ch);
|
||||
tt_int_op(ch->state, ==, CHANNEL_STATE_CLOSING);
|
||||
chan_test_finish_close(ch);
|
||||
tt_int_op(ch->state, ==, CHANNEL_STATE_CLOSED);
|
||||
channel_run_cleanup();
|
||||
ch = NULL;
|
||||
|
||||
done:
|
||||
free_fake_channel(ch);
|
||||
tor_free(cell);
|
||||
tor_free(var_cell);
|
||||
|
||||
UNMOCK(scheduler_channel_doesnt_want_writes);
|
||||
UNMOCK(scheduler_release_channel);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
test_channel_queue_size(void *arg)
|
||||
{
|
||||
@ -1666,6 +1776,7 @@ struct testcase_t channel_tests[] = {
|
||||
{ "lifecycle_2", test_channel_lifecycle_2, TT_FORK, NULL, NULL },
|
||||
{ "multi", test_channel_multi, TT_FORK, NULL, NULL },
|
||||
{ "queue_impossible", test_channel_queue_impossible, TT_FORK, NULL, NULL },
|
||||
{ "queue_incoming", test_channel_queue_incoming, TT_FORK, NULL, NULL },
|
||||
{ "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
|
||||
{ "write", test_channel_write, TT_FORK, NULL, NULL },
|
||||
END_OF_TESTCASES
|
||||
|
Loading…
Reference in New Issue
Block a user