Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* Copyright 2001,2002 Roger Dingledine, Matej Pfajfar. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* buffers.c */
|
|
|
|
|
|
|
|
#include "or.h"
|
|
|
|
|
2002-08-23 08:49:43 +02:00
|
|
|
extern or_options_t options; /* command-line and config-file options */
|
|
|
|
|
2003-03-04 05:36:37 +01:00
|
|
|
/* Create a new buf of size MAX_BUF_SIZE. Write a pointer to it
|
|
|
|
* into *buf, write MAX_BUF_SIZE into *buflen, and initialize
|
2003-05-20 08:41:23 +02:00
|
|
|
* *buf_datalen to 0. Return 0.
|
2003-03-04 05:36:37 +01:00
|
|
|
*/
|
2002-08-24 06:59:21 +02:00
|
|
|
int buf_new(char **buf, int *buflen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(buf && buflen && buf_datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
*buf = (char *)tor_malloc(MAX_BUF_SIZE);
|
2003-03-04 05:36:37 +01:00
|
|
|
// memset(*buf,0,MAX_BUF_SIZE);
|
2002-06-30 09:37:49 +02:00
|
|
|
*buflen = MAX_BUF_SIZE;
|
|
|
|
*buf_datalen = 0;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
void buf_free(char *buf) {
|
2002-06-27 00:45:49 +02:00
|
|
|
free(buf);
|
|
|
|
}
|
|
|
|
|
2003-07-05 09:10:34 +02:00
|
|
|
/* read from socket s, writing onto buf+buf_datalen.
|
2003-03-04 05:36:37 +01:00
|
|
|
* read at most 'at_most' bytes, and in any case don't read more than will fit based on buflen.
|
|
|
|
* If read() returns 0, set *reached_eof to 1 and return 0. If you want to tear
|
|
|
|
* down the connection return -1, else return the number of bytes read.
|
|
|
|
*/
|
2002-08-24 06:59:21 +02:00
|
|
|
int read_to_buf(int s, int at_most, char **buf, int *buflen, int *buf_datalen, int *reached_eof) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
int read_result;
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
int e;
|
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
assert(buf && *buf && buflen && buf_datalen && reached_eof && (s>=0));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2003-07-05 09:10:34 +02:00
|
|
|
if(at_most > *buflen - *buf_datalen)
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
at_most = *buflen - *buf_datalen; /* take the min of the two */
|
|
|
|
|
|
|
|
if(at_most == 0)
|
|
|
|
return 0; /* we shouldn't read anything */
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
// log_fn(LOG_DEBUG,"reading at most %d bytes.",at_most);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
read_result = read(s, *buf+*buf_datalen, at_most);
|
2002-06-27 00:45:49 +02:00
|
|
|
if (read_result < 0) {
|
2003-08-14 19:13:52 +02:00
|
|
|
if(!ERRNO_EAGAIN(errno)) { /* it's a real error */
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
e = correct_socket_errno(s);
|
2003-09-09 01:10:24 +02:00
|
|
|
if(!ERRNO_EAGAIN(e)) { /* no, it *is* a real error! */
|
2003-08-14 19:13:52 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
} else if (read_result == 0) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Encountered eof");
|
2002-06-30 09:37:49 +02:00
|
|
|
*reached_eof = 1;
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
} else { /* we read some bytes */
|
2002-06-30 09:37:49 +02:00
|
|
|
*buf_datalen += read_result;
|
2003-06-18 00:18:26 +02:00
|
|
|
// log_fn(LOG_DEBUG,"Read %d bytes. %d on inbuf.",read_result, *buf_datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
return read_result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-09-04 18:05:08 +02:00
|
|
|
int read_to_buf_tls(tor_tls *tls, int at_most, char **buf, int *buflen, int *buf_datalen) {
|
|
|
|
int r;
|
|
|
|
assert(tls && *buf && buflen && buf_datalen);
|
|
|
|
|
|
|
|
if (at_most > *buflen - *buf_datalen)
|
|
|
|
at_most = *buflen - *buf_datalen;
|
|
|
|
|
|
|
|
if (at_most == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
r = tor_tls_read(tls, *buf+*buf_datalen, at_most);
|
|
|
|
if (r<0)
|
|
|
|
return r;
|
|
|
|
*buf_datalen += r;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2002-08-24 06:59:21 +02:00
|
|
|
int flush_buf(int s, char **buf, int *buflen, int *buf_flushlen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* push from buf onto s
|
|
|
|
* then memmove to front of buf
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
* return -1 or how many bytes remain to be flushed */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
int write_result;
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
int e;
|
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
assert(buf && *buf && buflen && buf_flushlen && buf_datalen && (s>=0) && (*buf_flushlen <= *buf_datalen));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(*buf_flushlen == 0) /* nothing to flush */
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2002-07-16 04:12:58 +02:00
|
|
|
write_result = write(s, *buf, *buf_flushlen);
|
2002-06-27 00:45:49 +02:00
|
|
|
if (write_result < 0) {
|
2003-08-14 19:13:52 +02:00
|
|
|
if(!ERRNO_EAGAIN(errno)) { /* it's a real error */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
e = correct_socket_errno(s);
|
2003-09-09 01:10:24 +02:00
|
|
|
if(!ERRNO_EAGAIN(e)) { /* no, it *is* a real error! */
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-08-14 19:13:52 +02:00
|
|
|
#endif
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"write() would block, returning.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
} else {
|
2002-06-30 09:37:49 +02:00
|
|
|
*buf_datalen -= write_result;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
*buf_flushlen -= write_result;
|
2002-06-30 09:37:49 +02:00
|
|
|
memmove(*buf, *buf+write_result, *buf_datalen);
|
2003-09-18 10:11:31 +02:00
|
|
|
log_fn(LOG_DEBUG,"%d: flushed %d bytes, %d ready to flush, %d remain.",
|
|
|
|
s,write_result,*buf_flushlen,*buf_datalen);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
return *buf_flushlen;
|
2003-09-07 12:24:40 +02:00
|
|
|
/* XXX USE_TLS should change to return write_result like any sane function would */
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-09-04 18:05:08 +02:00
|
|
|
int flush_buf_tls(tor_tls *tls, char **buf, int *buflen, int *buf_flushlen, int *buf_datalen)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
assert(tls && *buf && buflen && buf_datalen);
|
2003-09-07 12:24:40 +02:00
|
|
|
|
|
|
|
/* we want to let tls write even if flushlen is zero, because it might
|
|
|
|
* have a partial record pending */
|
2003-09-04 18:05:08 +02:00
|
|
|
r = tor_tls_write(tls, *buf, *buf_flushlen);
|
|
|
|
if (r < 0) {
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
*buf_datalen -= r;
|
|
|
|
*buf_flushlen -= r;
|
|
|
|
memmove(*buf, *buf+r, *buf_datalen);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2002-08-24 06:59:21 +02:00
|
|
|
int write_to_buf(char *string, int string_len,
|
|
|
|
char **buf, int *buflen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* append string to buf (growing as needed, return -1 if "too big")
|
|
|
|
* return total number of bytes on the buf
|
|
|
|
*/
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(string && buf && *buf && buflen && buf_datalen);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* this is the point where you would grow the buffer, if you want to */
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
if (string_len + *buf_datalen > *buflen) { /* we're out of luck */
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG, "buflen too small. Time to implement growing dynamic bufs.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
memcpy(*buf+*buf_datalen, string, string_len);
|
|
|
|
*buf_datalen += string_len;
|
2003-06-18 00:18:26 +02:00
|
|
|
// log_fn(LOG_DEBUG,"added %d bytes to buf (now %d total).",string_len, *buf_datalen);
|
2002-06-30 09:37:49 +02:00
|
|
|
return *buf_datalen;
|
2003-03-17 03:42:45 +01:00
|
|
|
}
|
|
|
|
|
2002-08-24 06:59:21 +02:00
|
|
|
int fetch_from_buf(char *string, int string_len,
|
2002-09-28 07:53:00 +02:00
|
|
|
char **buf, int *buflen, int *buf_datalen) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-06-25 02:31:41 +02:00
|
|
|
/* There must be string_len bytes in buf; write them onto string,
|
2003-04-15 21:10:18 +02:00
|
|
|
* then memmove buf back (that is, remove them from buf).
|
|
|
|
*
|
|
|
|
* Return the number of bytes still on the buffer. */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(string && buf && *buf && buflen && buf_datalen);
|
2003-06-25 02:31:41 +02:00
|
|
|
assert(string_len <= *buf_datalen); /* make sure we don't ask for too much */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
memcpy(string,*buf,string_len);
|
|
|
|
*buf_datalen -= string_len;
|
|
|
|
memmove(*buf, *buf+string_len, *buf_datalen);
|
|
|
|
return *buf_datalen;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-09-17 22:09:06 +02:00
|
|
|
/* There is a (possibly incomplete) http statement on *buf, of the
|
|
|
|
* form "%s\r\n\r\n%s", headers, body.
|
|
|
|
* If a) the headers include a Content-Length field and all bytes in
|
|
|
|
* the body are present, or b) there's no Content-Length field and
|
|
|
|
* all headers are present, then:
|
|
|
|
* copy headers and body into the supplied args (and null terminate
|
|
|
|
* them), remove them from buf, and return 1.
|
|
|
|
* (If headers or body is NULL, discard that part of the buf.)
|
|
|
|
* If a headers or body doesn't fit in the arg, return -1.
|
|
|
|
*
|
|
|
|
* Else, change nothing and return 0.
|
|
|
|
*/
|
|
|
|
int fetch_from_buf_http(char *buf, int *buf_datalen,
|
|
|
|
char *headers_out, int max_headerlen,
|
|
|
|
char *body_out, int max_bodylen) {
|
|
|
|
char *headers, *body;
|
|
|
|
int i;
|
|
|
|
int headerlen, bodylen, contentlen;
|
|
|
|
|
|
|
|
assert(buf && buf_datalen);
|
|
|
|
|
|
|
|
headers = buf;
|
|
|
|
i = find_on_inbuf("\r\n\r\n", 4, buf, *buf_datalen);
|
|
|
|
if(i < 0) {
|
|
|
|
log_fn(LOG_DEBUG,"headers not all here yet.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
body = buf+i;
|
|
|
|
headerlen = body-headers; /* includes the CRLFCRLF */
|
|
|
|
bodylen = *buf_datalen - headerlen;
|
|
|
|
log_fn(LOG_DEBUG,"headerlen %d, bodylen %d.",headerlen,bodylen);
|
|
|
|
|
|
|
|
if(headers_out && max_headerlen <= headerlen) {
|
|
|
|
log_fn(LOG_DEBUG,"headerlen %d larger than %d. Failing.", headerlen, max_headerlen-1);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(body_out && max_bodylen <= bodylen) {
|
|
|
|
log_fn(LOG_DEBUG,"bodylen %d larger than %d. Failing.", bodylen, max_bodylen-1);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
#define CONTENT_LENGTH "\r\nContent-Length: "
|
2003-09-17 22:09:06 +02:00
|
|
|
i = find_on_inbuf(CONTENT_LENGTH, strlen(CONTENT_LENGTH), headers, headerlen);
|
|
|
|
if(i > 0) {
|
|
|
|
contentlen = atoi(headers+i);
|
|
|
|
if(bodylen < contentlen) {
|
|
|
|
log_fn(LOG_DEBUG,"body not all here yet.");
|
|
|
|
return 0; /* not all there yet */
|
|
|
|
}
|
|
|
|
bodylen = contentlen;
|
|
|
|
log_fn(LOG_DEBUG,"bodylen reduced to %d.",bodylen);
|
|
|
|
}
|
|
|
|
/* all happy. copy into the appropriate places, and return 1 */
|
|
|
|
if(headers_out) {
|
|
|
|
memcpy(headers_out,buf,headerlen);
|
|
|
|
headers_out[headerlen] = 0; /* null terminate it */
|
|
|
|
}
|
|
|
|
if(body_out) {
|
|
|
|
memcpy(body_out,buf+headerlen,bodylen);
|
|
|
|
body_out[bodylen] = 0; /* null terminate it */
|
|
|
|
}
|
|
|
|
*buf_datalen -= (headerlen+bodylen);
|
|
|
|
memmove(buf, buf+headerlen+bodylen, *buf_datalen);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-09-18 10:11:31 +02:00
|
|
|
/* There is a (possibly incomplete) socks handshake on *buf, of the
|
|
|
|
* forms
|
|
|
|
* socks4: "socksheader || username\0".
|
|
|
|
* socks4a: "socksheader || username\0 || destaddr\0".
|
|
|
|
* If it's a complete and valid handshake, and destaddr fits in addr_out,
|
|
|
|
* then pull the handshake off the buf, assign to addr_out and port_out,
|
|
|
|
* and return 1.
|
|
|
|
* If it's invalid or too big, return -1.
|
2003-09-21 08:15:43 +02:00
|
|
|
* Else it's not all there yet, change nothing and return 0.
|
2003-09-18 10:11:31 +02:00
|
|
|
*/
|
|
|
|
int fetch_from_buf_socks(char *buf, int *buf_datalen,
|
|
|
|
char *addr_out, int max_addrlen,
|
|
|
|
uint16_t *port_out) {
|
2003-09-21 08:15:43 +02:00
|
|
|
socks4_t socks4_info;
|
|
|
|
char *tmpbuf=NULL;
|
2003-09-18 10:11:31 +02:00
|
|
|
uint16_t port;
|
|
|
|
enum {socks4, socks4a } socks_prot = socks4a;
|
|
|
|
char *next, *startaddr;
|
|
|
|
|
|
|
|
if(*buf_datalen < sizeof(socks4_t)) /* basic info available? */
|
|
|
|
return 0; /* not yet */
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
/* an inlined socks4_unpack() */
|
|
|
|
socks4_info.version = *buf;
|
|
|
|
socks4_info.command = *(buf+1);
|
|
|
|
socks4_info.destport = *(uint16_t*)(buf+2);
|
|
|
|
socks4_info.destip = *(uint32_t*)(buf+4);
|
2003-09-18 10:11:31 +02:00
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
if(socks4_info.version != 4) {
|
|
|
|
log_fn(LOG_NOTICE,"Unrecognized version %d.",socks4_info.version);
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
if(socks4_info.command != 1) { /* not a connect? we don't support it. */
|
|
|
|
log_fn(LOG_NOTICE,"command %d not '1'.",socks4_info.command);
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
port = ntohs(socks4_info.destport);
|
2003-09-18 10:11:31 +02:00
|
|
|
if(!port) {
|
|
|
|
log_fn(LOG_NOTICE,"Port is zero.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
if(!socks4_info.destip) {
|
|
|
|
log_fn(LOG_NOTICE,"DestIP is zero.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(socks4_info.destip >> 8) {
|
|
|
|
struct in_addr in;
|
2003-09-18 10:11:31 +02:00
|
|
|
log_fn(LOG_NOTICE,"destip not in form 0.0.0.x.");
|
2003-09-21 08:15:43 +02:00
|
|
|
in.s_addr = htonl(socks4_info.destip);
|
|
|
|
tmpbuf = inet_ntoa(in);
|
2003-09-18 10:11:31 +02:00
|
|
|
if(max_addrlen <= strlen(tmpbuf)) {
|
2003-09-21 08:15:43 +02:00
|
|
|
log_fn(LOG_DEBUG,"socks4 addr too long.");
|
2003-09-18 10:11:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
log_fn(LOG_DEBUG,"Successfully read destip (%s)", tmpbuf);
|
|
|
|
socks_prot = socks4;
|
|
|
|
}
|
|
|
|
|
2003-09-21 08:15:43 +02:00
|
|
|
next = memchr(buf+SOCKS4_NETWORK_LEN, 0, *buf_datalen);
|
2003-09-18 10:11:31 +02:00
|
|
|
if(!next) {
|
|
|
|
log_fn(LOG_DEBUG,"Username not here yet.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
startaddr = next+1;
|
|
|
|
if(socks_prot == socks4a) {
|
|
|
|
next = memchr(startaddr, 0, buf+*buf_datalen-startaddr);
|
|
|
|
if(!next) {
|
|
|
|
log_fn(LOG_DEBUG,"Destaddr not here yet.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if(max_addrlen <= next-startaddr) {
|
|
|
|
log_fn(LOG_DEBUG,"Destaddr not here yet.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log_fn(LOG_DEBUG,"Everything is here. Success.");
|
|
|
|
*port_out = port;
|
|
|
|
strcpy(addr_out, socks_prot == socks4 ? tmpbuf : startaddr);
|
|
|
|
*buf_datalen -= (next-buf+1); /* next points to the final \0 on inbuf */
|
|
|
|
memmove(buf, next+1, *buf_datalen);
|
|
|
|
// log_fn(LOG_DEBUG,"buf_datalen is now %d:'%s'",*buf_datalen,buf);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2002-09-28 07:53:00 +02:00
|
|
|
int find_on_inbuf(char *string, int string_len,
|
|
|
|
char *buf, int buf_datalen) {
|
|
|
|
/* find first instance of needle 'string' on haystack 'buf'. return how
|
|
|
|
* many bytes from the beginning of buf to the end of string.
|
|
|
|
* If it's not there, return -1.
|
|
|
|
*/
|
|
|
|
|
|
|
|
char *location;
|
|
|
|
char *last_possible = buf + buf_datalen - string_len;
|
|
|
|
|
|
|
|
assert(string && string_len > 0 && buf);
|
|
|
|
|
|
|
|
if(buf_datalen < string_len)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for(location = buf; location <= last_possible; location++)
|
|
|
|
if((*location == *string) && !memcmp(location+1, string+1, string_len-1))
|
|
|
|
return location-buf+string_len;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-04-07 04:12:02 +02:00
|
|
|
/*
|
|
|
|
Local Variables:
|
|
|
|
mode:c
|
|
|
|
indent-tabs-mode:nil
|
|
|
|
c-basic-offset:2
|
|
|
|
End:
|
|
|
|
*/
|