major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 09:02:24 +00:00
|
|
|
/* Copyright 2003 Roger Dingledine. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
|
|
|
|
2003-06-24 05:17:09 +00:00
|
|
|
/* See http://elvin.dstc.com/ListArchive/elvin-dev/archive/2001/09/msg00027.html
|
|
|
|
* for some approaches to asynchronous dns. We will want to switch once one of
|
|
|
|
* them becomes more commonly available.
|
|
|
|
*/
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 09:02:24 +00:00
|
|
|
#include "or.h"
|
2003-06-17 14:31:05 +00:00
|
|
|
#include "tree.h"
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 09:02:24 +00:00
|
|
|
|
2003-02-14 07:53:55 +00:00
|
|
|
#define MAX_ADDRESSLEN 256
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
#define MAX_DNSWORKERS 50
|
|
|
|
#define MIN_DNSWORKERS 3
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 09:02:24 +00:00
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
int num_workers=0;
|
|
|
|
int num_workers_busy=0;
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
static int dns_assign_to_worker(connection_t *exitconn);
|
|
|
|
static int dns_found_answer(char *question, uint32_t answer);
|
|
|
|
static void dnsworker_main(int fd);
|
|
|
|
static int dns_spawn_worker(void);
|
|
|
|
static void spawn_enough_workers(void);
|
2003-02-14 07:53:55 +00:00
|
|
|
|
|
|
|
struct pending_connection_t {
|
|
|
|
struct connection_t *conn;
|
|
|
|
struct pending_connection_t *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cached_resolve {
|
|
|
|
SPLAY_ENTRY(cached_resolve) node;
|
|
|
|
char question[MAX_ADDRESSLEN]; /* the hostname to be resolved */
|
|
|
|
uint32_t answer; /* in host order. I know I'm horrible for assuming ipv4 */
|
|
|
|
char state; /* 0 is pending; 1 means answer is valid; 2 means resolve failed */
|
|
|
|
#define CACHE_STATE_PENDING 0
|
|
|
|
#define CACHE_STATE_VALID 1
|
|
|
|
#define CACHE_STATE_FAILED 2
|
|
|
|
uint32_t expire; /* remove untouched items from cache after some time? */
|
|
|
|
struct pending_connection_t *pending_connections;
|
|
|
|
struct cached_resolve *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
SPLAY_HEAD(cache_tree, cached_resolve) cache_root;
|
|
|
|
|
|
|
|
static int compare_cached_resolves(struct cached_resolve *a, struct cached_resolve *b) {
|
|
|
|
/* make this smarter one day? */
|
|
|
|
return strncasecmp(a->question, b->question, MAX_ADDRESSLEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
SPLAY_PROTOTYPE(cache_tree, cached_resolve, node, compare_cached_resolves);
|
|
|
|
SPLAY_GENERATE(cache_tree, cached_resolve, node, compare_cached_resolves);
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
static void init_cache_tree(void) {
|
2003-02-14 07:53:55 +00:00
|
|
|
SPLAY_INIT(&cache_root);
|
|
|
|
}
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
void dns_init(void) {
|
|
|
|
init_cache_tree();
|
|
|
|
spawn_enough_workers();
|
|
|
|
}
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
/* See if the question 'exitconn->address' has been answered. if so,
|
|
|
|
* if resolve valid, put it into exitconn->addr and exec to
|
|
|
|
* connection_exit_connect. If resolve failed, return -1.
|
2003-02-14 07:53:55 +00:00
|
|
|
*
|
|
|
|
* Else, if seen before and pending, add conn to the pending list,
|
|
|
|
* and return 0.
|
|
|
|
*
|
|
|
|
* Else, if not seen before, add conn to pending list, hand to
|
|
|
|
* dns farm, and return 0.
|
|
|
|
*/
|
|
|
|
int dns_resolve(connection_t *exitconn) {
|
|
|
|
struct cached_resolve *resolve;
|
2003-02-18 01:35:55 +00:00
|
|
|
struct cached_resolve search;
|
2003-02-14 07:53:55 +00:00
|
|
|
struct pending_connection_t *pending_connection;
|
|
|
|
|
2003-02-18 01:35:55 +00:00
|
|
|
strncpy(search.question, exitconn->address, MAX_ADDRESSLEN);
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-03-04 04:36:37 +00:00
|
|
|
/* check the tree to see if 'question' is already there. */
|
2003-02-18 01:35:55 +00:00
|
|
|
resolve = SPLAY_FIND(cache_tree, &cache_root, &search);
|
2003-03-04 04:36:37 +00:00
|
|
|
if(resolve) { /* already there */
|
2003-02-14 07:53:55 +00:00
|
|
|
switch(resolve->state) {
|
|
|
|
case CACHE_STATE_PENDING:
|
|
|
|
/* add us to the pending list */
|
2003-05-20 06:41:23 +00:00
|
|
|
pending_connection = tor_malloc(sizeof(struct pending_connection_t));
|
2003-02-14 07:53:55 +00:00
|
|
|
pending_connection->conn = exitconn;
|
2003-02-18 01:35:55 +00:00
|
|
|
pending_connection->next = resolve->pending_connections;
|
|
|
|
resolve->pending_connections = pending_connection;
|
|
|
|
return 0;
|
2003-02-14 07:53:55 +00:00
|
|
|
case CACHE_STATE_VALID:
|
|
|
|
exitconn->addr = resolve->answer;
|
|
|
|
return connection_exit_connect(exitconn);
|
|
|
|
case CACHE_STATE_FAILED:
|
|
|
|
return -1;
|
|
|
|
}
|
2003-03-04 04:36:37 +00:00
|
|
|
} else { /* need to add it */
|
2003-05-20 06:41:23 +00:00
|
|
|
resolve = tor_malloc(sizeof(struct cached_resolve));
|
2003-02-18 01:35:55 +00:00
|
|
|
memset(resolve, 0, sizeof(struct cached_resolve));
|
|
|
|
resolve->state = CACHE_STATE_PENDING;
|
|
|
|
strncpy(resolve->question, exitconn->address, MAX_ADDRESSLEN);
|
2003-02-14 07:53:55 +00:00
|
|
|
|
|
|
|
/* add us to the pending list */
|
2003-05-20 06:41:23 +00:00
|
|
|
pending_connection = tor_malloc(sizeof(struct pending_connection_t));
|
2003-02-14 07:53:55 +00:00
|
|
|
pending_connection->conn = exitconn;
|
2003-02-18 01:35:55 +00:00
|
|
|
pending_connection->next = resolve->pending_connections;
|
|
|
|
resolve->pending_connections = pending_connection;
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-02-18 01:35:55 +00:00
|
|
|
SPLAY_INSERT(cache_tree, &cache_root, resolve);
|
2003-06-17 14:31:05 +00:00
|
|
|
return dns_assign_to_worker(exitconn);
|
2003-02-14 07:53:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(0);
|
|
|
|
return 0; /* not reached; keep gcc happy */
|
|
|
|
}
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
static int dns_assign_to_worker(connection_t *exitconn) {
|
2003-02-14 07:53:55 +00:00
|
|
|
connection_t *dnsconn;
|
|
|
|
unsigned char len;
|
2003-06-17 14:31:05 +00:00
|
|
|
struct hostent *rent;
|
|
|
|
|
|
|
|
spawn_enough_workers(); /* respawn here, to be sure there are enough */
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
dnsconn = connection_get_by_type_state(CONN_TYPE_DNSWORKER, DNSWORKER_STATE_IDLE);
|
2003-02-14 07:53:55 +00:00
|
|
|
|
|
|
|
if(!dnsconn) {
|
2003-06-17 14:31:05 +00:00
|
|
|
log(LOG_INFO,"dns_assign_to_worker(): no idle dns workers. Doing it myself.");
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
/* short version which does it all right here */
|
2003-02-14 07:53:55 +00:00
|
|
|
rent = gethostbyname(exitconn->address);
|
|
|
|
if (!rent) {
|
2003-06-17 14:31:05 +00:00
|
|
|
return dns_found_answer(exitconn->address, 0);
|
2003-02-14 07:53:55 +00:00
|
|
|
}
|
2003-06-17 14:31:05 +00:00
|
|
|
return dns_found_answer(exitconn->address, *(uint32_t *)rent->h_addr);
|
2003-02-14 07:53:55 +00:00
|
|
|
}
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
dnsconn->address = strdup(exitconn->address);
|
|
|
|
dnsconn->state = DNSWORKER_STATE_BUSY;
|
|
|
|
num_workers_busy++;
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
len = strlen(dnsconn->address);
|
|
|
|
/* FFFF we should have it retry if the first worker bombs out */
|
|
|
|
if(connection_write_to_buf(&len, 1, dnsconn) < 0 ||
|
|
|
|
connection_write_to_buf(dnsconn->address, len, dnsconn) < 0) {
|
|
|
|
log(LOG_NOTICE,"dns_assign_to_worker(): Write failed. Closing worker and failing resolve.");
|
|
|
|
dnsconn->marked_for_close = 1;
|
2003-02-14 07:53:55 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
// log(LOG_DEBUG,"dns_assign_to_worker(): submitted '%s'", exitconn->address);
|
2003-02-14 07:53:55 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
static int dns_found_answer(char *question, uint32_t answer) {
|
2003-02-14 07:53:55 +00:00
|
|
|
struct pending_connection_t *pend;
|
|
|
|
struct cached_resolve search;
|
|
|
|
struct cached_resolve *resolve;
|
|
|
|
|
|
|
|
strncpy(search.question, question, MAX_ADDRESSLEN);
|
|
|
|
|
|
|
|
resolve = SPLAY_FIND(cache_tree, &cache_root, &search);
|
|
|
|
if(!resolve) {
|
|
|
|
log(LOG_ERR,"dns_found_answer(): Answer to unasked question '%s'? Dropping.", question);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
assert(resolve->state == CACHE_STATE_PENDING);
|
2003-04-05 19:53:45 +00:00
|
|
|
/* XXX this is a bug which hasn't been found yet. Probably something
|
|
|
|
* about slaves answering questions when they're not supposed to, and
|
|
|
|
* reusing the old question.
|
|
|
|
*/
|
|
|
|
if(resolve->state != CACHE_STATE_PENDING) {
|
|
|
|
log(LOG_ERR,"dns_found_answer(): BUG: resolve '%s' in state %d (not pending). Dropping.",question, resolve->state);
|
|
|
|
return 0;
|
|
|
|
}
|
2003-02-14 07:53:55 +00:00
|
|
|
|
2003-02-16 02:05:24 +00:00
|
|
|
resolve->answer = ntohl(answer);
|
2003-06-17 14:31:05 +00:00
|
|
|
if(resolve->answer)
|
2003-02-14 07:53:55 +00:00
|
|
|
resolve->state = CACHE_STATE_VALID;
|
|
|
|
else
|
|
|
|
resolve->state = CACHE_STATE_FAILED;
|
|
|
|
|
|
|
|
while(resolve->pending_connections) {
|
|
|
|
pend = resolve->pending_connections;
|
2003-02-16 02:05:24 +00:00
|
|
|
pend->conn->addr = resolve->answer;
|
2003-02-14 07:53:55 +00:00
|
|
|
if(resolve->state == CACHE_STATE_FAILED || connection_exit_connect(pend->conn) < 0) {
|
|
|
|
pend->conn->marked_for_close = 1;
|
|
|
|
}
|
|
|
|
resolve->pending_connections = pend->next;
|
|
|
|
free(pend);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-06-17 14:31:05 +00:00
|
|
|
/******************************************************************/
|
|
|
|
|
|
|
|
int connection_dns_finished_flushing(connection_t *conn) {
|
|
|
|
assert(conn && conn->type == CONN_TYPE_DNSWORKER);
|
|
|
|
connection_stop_writing(conn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int connection_dns_process_inbuf(connection_t *conn) {
|
|
|
|
uint32_t answer;
|
|
|
|
|
|
|
|
assert(conn && conn->type == CONN_TYPE_DNSWORKER);
|
|
|
|
|
|
|
|
if(conn->inbuf_reached_eof) {
|
|
|
|
log(LOG_ERR,"connection_dnsworker_process_inbuf(): Read eof. Worker dying.");
|
|
|
|
/* XXX if the dns request is pending, go through and either repeat or mark it failed */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(conn->state == DNSWORKER_STATE_BUSY);
|
|
|
|
if(conn->inbuf_datalen < 4) /* entire answer available? */
|
|
|
|
return 0; /* not yet */
|
|
|
|
assert(conn->inbuf_datalen == 4);
|
|
|
|
|
|
|
|
if(connection_fetch_from_buf((char*)&answer,sizeof(answer),conn) < 0) {
|
|
|
|
log(LOG_ERR,"connection_dnsworker_process_inbuf(): Broken inbuf. Worker dying.");
|
|
|
|
/* XXX exitconn's never going to get his answer :( */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
dns_found_answer(conn->address, answer);
|
|
|
|
|
|
|
|
free(conn->address);
|
|
|
|
conn->address = NULL;
|
|
|
|
conn->state = DNSWORKER_STATE_IDLE;
|
|
|
|
num_workers_busy--;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dnsworker_main(int fd) {
|
|
|
|
char question[MAX_ADDRESSLEN];
|
|
|
|
unsigned char question_len;
|
|
|
|
struct hostent *rent;
|
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
|
|
|
|
if(read(fd, &question_len, 1) != 1) {
|
|
|
|
log(LOG_INFO,"dnsworker_main(): read length failed. Exiting.");
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
assert(question_len > 0);
|
|
|
|
|
|
|
|
if(read(fd, question, question_len) != question_len) {
|
|
|
|
log(LOG_INFO,"dnsworker_main(): read hostname failed. Exiting.");
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
question[question_len] = 0; /* null terminate it */
|
|
|
|
|
|
|
|
rent = gethostbyname(question);
|
|
|
|
if (!rent) {
|
|
|
|
log(LOG_INFO,"dnsworker_main(): Could not resolve dest addr %s. Returning nulls.",question);
|
|
|
|
/* XXX it's conceivable write could return 1 through 3. but that's never gonna happen, right? */
|
|
|
|
if(write(fd, "\0\0\0\0", 4) != 4) {
|
|
|
|
log(LOG_INFO,"dnsworker_main(): writing nulls failed. Exiting.");
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(rent->h_length == 4); /* break to remind us if we move away from ipv4 */
|
|
|
|
if(write(fd, rent->h_addr, 4) != 4) {
|
|
|
|
log(LOG_INFO,"dnsworker_main(): writing answer failed. Exiting.");
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
log(LOG_INFO,"dnsworker_main(): Answered question '%s'.",question);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dns_spawn_worker(void) {
|
|
|
|
pid_t pid;
|
|
|
|
int fd[2];
|
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
if(socketpair(AF_UNIX, SOCK_STREAM, 0, fd) < 0) {
|
|
|
|
perror("socketpair");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
pid = fork();
|
|
|
|
if(pid < 0) {
|
|
|
|
perror("fork");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
if(pid == 0) { /* i'm the child */
|
|
|
|
close(fd[0]);
|
|
|
|
dnsworker_main(fd[1]);
|
|
|
|
assert(0); /* never gets here */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* i'm the parent */
|
|
|
|
log(LOG_DEBUG,"dns_spawn_worker(): just spawned a worker.");
|
|
|
|
close(fd[1]);
|
|
|
|
|
|
|
|
conn = connection_new(CONN_TYPE_DNSWORKER);
|
|
|
|
if(!conn) {
|
|
|
|
close(fd[0]);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
fcntl(fd[0], F_SETFL, O_NONBLOCK); /* set it to non-blocking */
|
|
|
|
|
|
|
|
/* set up conn so it's got all the data we need to remember */
|
|
|
|
conn->receiver_bucket = -1; /* non-cell connections don't do receiver buckets */
|
|
|
|
conn->bandwidth = -1;
|
|
|
|
conn->s = fd[0];
|
|
|
|
|
|
|
|
if(connection_add(conn) < 0) { /* no space, forget it */
|
|
|
|
log(LOG_INFO,"dns_spawn_worker(): connection_add failed. Giving up.");
|
|
|
|
connection_free(conn); /* this closes fd[0] */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->state = DNSWORKER_STATE_IDLE;
|
|
|
|
connection_start_reading(conn);
|
|
|
|
|
|
|
|
return 0; /* success */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spawn_enough_workers(void) {
|
|
|
|
int num_workers_needed; /* aim to have 1 more than needed,
|
|
|
|
* but no less than min and no more than max */
|
|
|
|
|
|
|
|
if(num_workers_busy >= MIN_DNSWORKERS)
|
|
|
|
num_workers_needed = num_workers_busy+1;
|
|
|
|
else
|
|
|
|
num_workers_needed = MIN_DNSWORKERS;
|
|
|
|
|
|
|
|
if(num_workers_needed >= MAX_DNSWORKERS)
|
|
|
|
num_workers_needed = MAX_DNSWORKERS;
|
|
|
|
|
|
|
|
while(num_workers < num_workers_needed) {
|
|
|
|
if(dns_spawn_worker() < 0) {
|
|
|
|
log(LOG_ERR,"spawn_enough_workers(): spawn failed!");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
num_workers++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FFFF this is where we will cull extra workers */
|
|
|
|
}
|
|
|
|
|
2003-04-07 02:12:02 +00:00
|
|
|
/*
|
|
|
|
Local Variables:
|
|
|
|
mode:c
|
|
|
|
indent-tabs-mode:nil
|
|
|
|
c-basic-offset:2
|
|
|
|
End:
|
|
|
|
*/
|