summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/ap_mmn.h3
-rw-r--r--modules/proxy/mod_proxy.h22
-rw-r--r--modules/proxy/mod_proxy_http.c25
-rw-r--r--modules/proxy/proxy_util.c54
4 files changed, 18 insertions, 86 deletions
diff --git a/include/ap_mmn.h b/include/ap_mmn.h
index 792c8cd837..a2c40a1f72 100644
--- a/include/ap_mmn.h
+++ b/include/ap_mmn.h
@@ -284,12 +284,13 @@
* 20101106.1 (2.3.9-dev) Add ap_pool_cleanup_set_null() generic cleanup
* 20101106.2 (2.3.9-dev) Add suexec_disabled_reason field to ap_unixd_config
* 20101113.0 (2.3.9-dev) Add source address to mod_proxy.h
+ * 20101116.0 (2.3.9-dev) Remove ap_proxy_buckets_lifetime_transform()
*/
#define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */
#ifndef MODULE_MAGIC_NUMBER_MAJOR
-#define MODULE_MAGIC_NUMBER_MAJOR 20101113
+#define MODULE_MAGIC_NUMBER_MAJOR 20101116
#endif
#define MODULE_MAGIC_NUMBER_MINOR 0 /* 0...n */
diff --git a/modules/proxy/mod_proxy.h b/modules/proxy/mod_proxy.h
index 6da213cbca..20b818f17d 100644
--- a/modules/proxy/mod_proxy.h
+++ b/modules/proxy/mod_proxy.h
@@ -779,28 +779,6 @@ PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r,
apr_bucket_brigade *brigade);
/**
- * Transform buckets from one bucket allocator to another one by creating a
- * transient bucket for each data bucket and let it use the data read from
- * the old bucket. Metabuckets are transformed by just recreating them.
- * Attention: Currently only the following bucket types are handled:
- *
- * All data buckets
- * FLUSH
- * EOS
- *
- * If an other bucket type is found its type is logged as a debug message
- * and APR_EGENERAL is returned.
- * @param r current request record of client request. Only used for logging
- * purposes
- * @param from the brigade that contains the buckets to transform
- * @param to the brigade that will receive the transformed buckets
- * @return APR_SUCCESS if all buckets could be transformed APR_EGENERAL
- * otherwise
- */
-PROXY_DECLARE(apr_status_t)
-ap_proxy_buckets_lifetime_transform(request_rec *r, apr_bucket_brigade *from,
- apr_bucket_brigade *to);
-/**
* Return a hash based on the passed string
* @param str string to produce hash from
* @param method hashing method to use
diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c
index e2204a6c93..72602b16b7 100644
--- a/modules/proxy/mod_proxy_http.c
+++ b/modules/proxy/mod_proxy_http.c
@@ -1394,7 +1394,6 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
request_rec *rp;
apr_bucket *e;
apr_bucket_brigade *bb, *tmp_bb;
- apr_bucket_brigade *pass_bb;
int len, backasswards;
int interim_response = 0; /* non-zero whilst interim 1xx responses
* are being read. */
@@ -1422,8 +1421,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
&& !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0")));
bb = apr_brigade_create(p, c->bucket_alloc);
- pass_bb = apr_brigade_create(p, c->bucket_alloc);
-
+
/* Setup for 100-Continue timeout if appropriate */
if (do_100_continue) {
apr_socket_timeout_get(backend->sock, &old_timeout);
@@ -1900,16 +1898,23 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
break;
}
- /* Switch the allocator lifetime of the buckets */
- ap_proxy_buckets_lifetime_transform(r, bb, pass_bb);
- apr_brigade_cleanup(bb);
-
/* found the last brigade? */
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(pass_bb))) {
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
/* signal that we must leave */
finish = TRUE;
+ /* the brigade may contain transient buckets that contain
+ * data that lives only as long as the backend connection.
+ * Force a setaside so these transient buckets become heap
+ * buckets that live as long as the request.
+ */
+ for (e = APR_BRIGADE_FIRST(bb); e
+ != APR_BRIGADE_SENTINEL(bb); e
+ = APR_BUCKET_NEXT(e)) {
+ apr_bucket_setaside(e, r->pool);
+ }
+
/* make sure we release the backend connection as soon
* as we know we are done, so that the backend isn't
* left waiting for a slow client to eventually
@@ -1921,7 +1926,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
}
/* try send what we read */
- if (ap_pass_brigade(r->output_filters, pass_bb) != APR_SUCCESS
+ if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS
|| c->aborted) {
/* Ack! Phbtt! Die! User aborted! */
backend->close = 1; /* this causes socket close below */
@@ -1929,7 +1934,7 @@ apr_status_t ap_proxy_http_process_response(apr_pool_t * p, request_rec *r,
}
/* make sure we always clean up after ourselves */
- apr_brigade_cleanup(pass_bb);
+ apr_brigade_cleanup(bb);
} while (!finish);
}
diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
index 277c7d4155..0d6d16c826 100644
--- a/modules/proxy/proxy_util.c
+++ b/modules/proxy/proxy_util.c
@@ -2643,19 +2643,17 @@ PROXY_DECLARE(int) ap_proxy_connection_create(const char *proxy_function,
apr_sockaddr_t *backend_addr = conn->addr;
int rc;
apr_interval_time_t current_timeout;
- apr_bucket_alloc_t *bucket_alloc;
if (conn->connection) {
return OK;
}
- bucket_alloc = apr_bucket_alloc_create(conn->scpool);
/*
* The socket is now open, create a new backend server connection
*/
conn->connection = ap_run_create_connection(conn->scpool, s, conn->sock,
0, NULL,
- bucket_alloc);
+ c->bucket_alloc);
if (!conn->connection) {
/*
@@ -2743,56 +2741,6 @@ PROXY_DECLARE(void) ap_proxy_backend_broke(request_rec *r,
}
/*
- * Transform buckets from one bucket allocator to another one by creating a
- * transient bucket for each data bucket and let it use the data read from
- * the old bucket. Metabuckets are transformed by just recreating them.
- * Attention: Currently only the following bucket types are handled:
- *
- * All data buckets
- * FLUSH
- * EOS
- *
- * If an other bucket type is found its type is logged as a debug message
- * and APR_EGENERAL is returned.
- */
-PROXY_DECLARE(apr_status_t)
-ap_proxy_buckets_lifetime_transform(request_rec *r, apr_bucket_brigade *from,
- apr_bucket_brigade *to)
-{
- apr_bucket *e;
- apr_bucket *new;
- const char *data;
- apr_size_t bytes;
- apr_status_t rv = APR_SUCCESS;
-
- apr_brigade_cleanup(to);
- for (e = APR_BRIGADE_FIRST(from);
- e != APR_BRIGADE_SENTINEL(from);
- e = APR_BUCKET_NEXT(e)) {
- if (!APR_BUCKET_IS_METADATA(e)) {
- apr_bucket_read(e, &data, &bytes, APR_BLOCK_READ);
- new = apr_bucket_transient_create(data, bytes, r->connection->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(to, new);
- }
- else if (APR_BUCKET_IS_FLUSH(e)) {
- new = apr_bucket_flush_create(r->connection->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(to, new);
- }
- else if (APR_BUCKET_IS_EOS(e)) {
- new = apr_bucket_eos_create(r->connection->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(to, new);
- }
- else {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
- "proxy: Unhandled bucket type of type %s in"
- " ap_proxy_buckets_lifetime_transform", e->type->name);
- rv = APR_EGENERAL;
- }
- }
- return rv;
-}
-
-/*
* Provide a string hashing function for the proxy.
* We offer 2 methods: one is the APR model but we
* also provide our own, based on either FNV or SDBM.