Release 2.19.7

- Handle ECT-CE event: issue a loss event.
- Log the fact that we ignore SETTINGS_MAX_HEADER_LIST_SIZE.
- Use Max Push ID in GOAWAY frame to cancel promises.
- Add support for HTTP/3 CANCEL_PUSH frame.
- lsquic_stream_is_pushed: streams without headers are never pushed.
- [BUGFIX] Regression in lsquic_stream_shutdown_internal: now it shuts down.
- Improve logic whether to generate CONNECTION_CLOSE.
This commit is contained in:
Dmitri Tikhonov 2020-08-26 09:00:45 -04:00
parent 36fcb9aa01
commit 03fef29b03
7 changed files with 176 additions and 40 deletions

View file

@ -1,3 +1,13 @@
2020-08-26
- 2.19.7
- Handle ECT-CE event: issue a loss event.
- Log the fact that we ignore SETTINGS_MAX_HEADER_LIST_SIZE.
- Use Max Push ID in GOAWAY frame to cancel promises.
- Add support for HTTP/3 CANCEL_PUSH frame.
- lsquic_stream_is_pushed: streams without headers are never pushed.
- [BUGFIX] Regression in lsquic_stream_shutdown_internal: now it shuts down.
- Improve logic whether to generate CONNECTION_CLOSE.
2020-08-20
- 2.19.6
- Don't process incoming ECN marks if ECN is not enabled.

View file

@ -377,7 +377,10 @@ settings structure:
.. member:: int es_silent_close
SCLS (silent close)
When true, ``CONNECTION_CLOSE`` is not sent when connection times out.
The server will also not send a reply to client's ``CONNECTION_CLOSE``.
Corresponds to SCLS (silent close) gQUIC option.
.. member:: unsigned es_max_header_list_size
@ -868,7 +871,8 @@ out of date. Please check your :file:`lsquic.h` for actual values.*
.. macro:: LSQUIC_DF_SILENT_CLOSE
By default, connections are closed silenty when they time out (no
CONNECTION_CLOSE frame is sent).
``CONNECTION_CLOSE`` frame is sent) and the server does not reply with
own ``CONNECTION_CLOSE`` after it receives one.
.. macro:: LSQUIC_DF_MAX_HEADER_LIST_SIZE

View file

@ -26,7 +26,7 @@ author = u'LiteSpeed Technologies'
# The short X.Y version
version = u'2.19'
# The full version, including alpha/beta/rc tags
release = u'2.19.6'
release = u'2.19.7'
# -- General configuration ---------------------------------------------------

View file

@ -25,7 +25,7 @@ extern "C" {
#define LSQUIC_MAJOR_VERSION 2
#define LSQUIC_MINOR_VERSION 19
#define LSQUIC_PATCH_VERSION 6
#define LSQUIC_PATCH_VERSION 7
/**
* Engine flags:
@ -446,7 +446,12 @@ struct lsquic_engine_settings {
/** ICSL in microseconds; GQUIC only */
unsigned long es_idle_conn_to;
/** SCLS (silent close) */
/**
* When true, CONNECTION_CLOSE is not sent when connection times out.
* The server will also not send a reply to client's CONNECTION_CLOSE.
*
* Corresponds to SCLS (silent close) gQUIC option.
*/
int es_silent_close;
/**

View file

@ -144,6 +144,7 @@ enum more_flags
MF_NOPROG_TIMEOUT = 1 << 1,
MF_CHECK_MTU_PROBE = 1 << 2,
MF_IGNORE_MISSING = 1 << 3,
MF_CONN_CLOSE_PACK = 1 << 4, /* CONNECTION_CLOSE has been packetized */
};
@ -393,8 +394,6 @@ struct ietf_full_conn
struct qpack_dec_hdl ifc_qdh;
struct {
uint64_t header_table_size,
num_placeholders,
max_header_list_size,
qpack_blocked_streams;
} ifc_peer_hq_settings;
struct dcid_elem *ifc_dces[MAX_IETF_CONN_DCIDS];
@ -1180,7 +1179,6 @@ ietf_full_conn_init (struct ietf_full_conn *conn,
conn->ifc_pub.u.ietf.qdh = &conn->ifc_qdh;
conn->ifc_peer_hq_settings.header_table_size = HQ_DF_QPACK_MAX_TABLE_CAPACITY;
conn->ifc_peer_hq_settings.max_header_list_size = HQ_DF_MAX_HEADER_LIST_SIZE;
conn->ifc_peer_hq_settings.qpack_blocked_streams = HQ_DF_QPACK_BLOCKED_STREAMS;
conn->ifc_flags = flags | IFC_CREATED_OK | IFC_FIRST_TICK;
@ -3749,7 +3747,8 @@ immediate_close (struct ietf_full_conn *conn)
*/
lsquic_send_ctl_drop_scheduled(&conn->ifc_send_ctl);
if (conn->ifc_flags & (IFC_TIMED_OUT|IFC_HSK_FAILED))
if ((conn->ifc_flags & (IFC_TIMED_OUT|IFC_HSK_FAILED))
&& conn->ifc_settings->es_silent_close)
return TICK_CLOSE;
packet_out = lsquic_send_ctl_new_packet_out(&conn->ifc_send_ctl, 0,
@ -3805,6 +3804,7 @@ immediate_close (struct ietf_full_conn *conn)
}
lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz);
packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE;
conn->ifc_mflags |= MF_CONN_CLOSE_PACK;
LSQ_DEBUG("generated CONNECTION_CLOSE frame in its own packet");
return TICK_SEND|TICK_CLOSE;
}
@ -3980,6 +3980,7 @@ generate_connection_close_packet (struct ietf_full_conn *conn)
}
lsquic_send_ctl_incr_pack_sz(&conn->ifc_send_ctl, packet_out, sz);
packet_out->po_frame_types |= 1 << QUIC_FRAME_CONNECTION_CLOSE;
conn->ifc_mflags |= MF_CONN_CLOSE_PACK;
LSQ_DEBUG("generated CONNECTION_CLOSE frame in its own packet");
conn->ifc_send_flags &= ~SF_SEND_CONN_CLOSE;
}
@ -4668,9 +4669,11 @@ new_stream (struct ietf_full_conn *conn, lsquic_stream_id_t stream_id,
{
iface = unicla_if_ptr;
stream_ctx = conn;
#if CLIENT_PUSH_SUPPORT
/* FIXME: This logic does not work for push streams. Perhaps one way
* to address this is to reclassify them later?
*/
#endif
flags |= SCF_CRITICAL;
}
else
@ -7011,6 +7014,7 @@ check_or_schedule_mtu_probe (struct ietf_full_conn *conn, lsquic_time_t now)
|| ds->ds_probe_sent + conn->ifc_enpub->enp_mtu_probe_timer < now);
if (!(conn->ifc_conn.cn_flags & LSCONN_HANDSHAKE_DONE)
|| (conn->ifc_flags & IFC_CLOSING)
|| lsquic_senhist_largest(&conn->ifc_send_ctl.sc_senhist) < 30
|| lsquic_send_ctl_in_recovery(&conn->ifc_send_ctl)
|| !lsquic_send_ctl_can_send_probe(&conn->ifc_send_ctl,
@ -7369,14 +7373,25 @@ ietf_full_conn_ci_tick (struct lsquic_conn *lconn, lsquic_time_t now)
{
LSQ_DEBUG("connection is OK to close");
conn->ifc_flags |= IFC_TICK_CLOSE;
if ((conn->ifc_send_flags & SF_SEND_CONN_CLOSE)
/* This is normal termination sequence for the server:
*
* Generate CONNECTION_CLOSE frame if we are responding to one
* or have packets scheduled to send
if (!(conn->ifc_mflags & MF_CONN_CLOSE_PACK)
/* Generate CONNECTION_CLOSE frame if:
* ... this is a client and handshake was successful;
*/
&& (!(conn->ifc_flags & (IFC_SERVER|IFC_HSK_FAILED))
|| (conn->ifc_flags & (IFC_RECV_CLOSE|IFC_GOAWAY_CLOSE))
/* or: sent a GOAWAY frame;
*/
|| (conn->ifc_flags & IFC_GOAWAY_CLOSE)
/* or: we received CONNECTION_CLOSE and we are not a server
* that chooses not to send CONNECTION_CLOSE responses.
* From [draft-ietf-quic-transport-29]:
" An endpoint that receives a CONNECTION_CLOSE frame MAY send
" a single packet containing a CONNECTION_CLOSE frame before
" entering the draining state
*/
|| ((conn->ifc_flags & IFC_RECV_CLOSE)
&& !((conn->ifc_flags & IFC_SERVER)
&& conn->ifc_settings->es_silent_close))
/* or: we have packets to send. */
|| 0 != lsquic_send_ctl_n_scheduled(&conn->ifc_send_ctl))
)
{
@ -7829,11 +7844,77 @@ static const struct conn_iface *ietf_full_conn_prehsk_iface_ptr =
static void
on_cancel_push (void *ctx, uint64_t push_id)
on_cancel_push_client (void *ctx, uint64_t push_id)
{
struct ietf_full_conn *const conn = ctx;
LSQ_DEBUG("TODO %s: %"PRIu64, __func__, push_id);
/* TODO */
EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "Received CANCEL_PUSH(%"PRIu64")",
push_id);
if (conn->ifc_u.cli.ifcli_flags & IFCLI_PUSH_ENABLED)
{
ABORT_QUIETLY(1, HEC_ID_ERROR, "received CANCEL_PUSH but push is "
"not enabled");
return;
}
if (push_id > conn->ifc_u.cli.ifcli_max_push_id)
{
ABORT_QUIETLY(1, HEC_ID_ERROR, "received CANCEL_PUSH with ID=%"PRIu64
", which is greater than the maximum Push ID=%"PRIu64, push_id,
conn->ifc_u.cli.ifcli_max_push_id);
return;
}
#if CLIENT_PUSH_SUPPORT
LSQ_WARN("TODO: support for CANCEL_PUSH is not implemented");
#endif
}
/* Careful: this puts promise */
static void
cancel_push_promise (struct ietf_full_conn *conn, struct push_promise *promise)
{
LSQ_DEBUG("cancel promise %"PRIu64, promise->pp_id);
/* Remove promise from hash to prevent multiple cancellations */
lsquic_hash_erase(conn->ifc_pub.u.ietf.promises, &promise->pp_hash_id);
lsquic_pp_put(promise, conn->ifc_pub.u.ietf.promises);
/* But let stream dtor free the promise object as sm_promise may yet
* be used by the stream in some ways.
*/
lsquic_stream_shutdown_internal(promise->pp_pushed_stream);
if (0 != lsquic_hcso_write_cancel_push(&conn->ifc_hcso, promise->pp_id))
ABORT_WARN("cannot write CANCEL_PUSH");
}
static void
on_cancel_push_server (void *ctx, uint64_t push_id)
{
struct ietf_full_conn *const conn = ctx;
struct lsquic_hash_elem *el;
struct push_promise *promise;
EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "Received CANCEL_PUSH(%"PRIu64")",
push_id);
if (push_id >= conn->ifc_u.ser.ifser_next_push_id)
{
ABORT_QUIETLY(1, HEC_ID_ERROR, "received CANCEL_PUSH with ID=%"PRIu64
", which is greater than the maximum Push ID ever generated by "
"this connection", push_id);
return;
}
el = lsquic_hash_find(conn->ifc_pub.u.ietf.promises, &push_id,
sizeof(push_id));
if (!el)
{
LSQ_DEBUG("push promise %"PRIu64" not found", push_id);
return;
}
promise = lsquic_hashelem_getdata(el);
cancel_push_promise(conn, promise);
}
@ -7918,9 +7999,8 @@ on_setting (void *ctx, uint64_t setting_id, uint64_t value)
conn->ifc_peer_hq_settings.header_table_size = value;
break;
case HQSID_MAX_HEADER_LIST_SIZE:
LSQ_DEBUG("Peer's SETTINGS_MAX_HEADER_LIST_SIZE=%"PRIu64, value);
conn->ifc_peer_hq_settings.max_header_list_size = value;
/* TODO: apply it */
LSQ_DEBUG("Peer's SETTINGS_MAX_HEADER_LIST_SIZE=%"PRIu64"; "
"we ignore it", value);
break;
default:
LSQ_DEBUG("received unknown SETTING 0x%"PRIX64"=0x%"PRIX64
@ -8037,7 +8117,19 @@ on_goaway_client (void *ctx, uint64_t stream_id)
static void
on_goaway_server (void *ctx, uint64_t max_push_id)
{
/* TODO: cancel pushes? */
struct ietf_full_conn *const conn = ctx;
struct push_promise *promise;
struct lsquic_hash_elem *el;
EV_LOG_CONN_EVENT(LSQUIC_LOG_CONN_ID, "Received GOAWAY(%"PRIu64")",
max_push_id);
for (el = lsquic_hash_first(conn->ifc_pub.u.ietf.promises); el;
el = lsquic_hash_next(conn->ifc_pub.u.ietf.promises))
{
promise = lsquic_hashelem_getdata(el);
if (promise->pp_id >= max_push_id)
cancel_push_promise(conn, promise);
}
}
@ -8052,7 +8144,7 @@ on_unexpected_frame (void *ctx, uint64_t frame_type)
static const struct hcsi_callbacks hcsi_callbacks_server_27 =
{
.on_cancel_push = on_cancel_push,
.on_cancel_push = on_cancel_push_server,
.on_max_push_id = on_max_push_id,
.on_settings_frame = on_settings_frame,
.on_setting = on_setting,
@ -8062,7 +8154,7 @@ static const struct hcsi_callbacks hcsi_callbacks_server_27 =
static const struct hcsi_callbacks hcsi_callbacks_client_27 =
{
.on_cancel_push = on_cancel_push,
.on_cancel_push = on_cancel_push_client,
.on_max_push_id = on_max_push_id_client,
.on_settings_frame = on_settings_frame,
.on_setting = on_setting,
@ -8073,7 +8165,7 @@ static const struct hcsi_callbacks hcsi_callbacks_client_27 =
static const struct hcsi_callbacks hcsi_callbacks_server_28 =
{
.on_cancel_push = on_cancel_push,
.on_cancel_push = on_cancel_push_server,
.on_max_push_id = on_max_push_id,
.on_settings_frame = on_settings_frame,
.on_setting = on_setting,
@ -8083,7 +8175,7 @@ static const struct hcsi_callbacks hcsi_callbacks_server_28 =
static const struct hcsi_callbacks hcsi_callbacks_client_28 =
{
.on_cancel_push = on_cancel_push,
.on_cancel_push = on_cancel_push_client,
.on_max_push_id = on_max_push_id_client,
.on_settings_frame = on_settings_frame,
.on_setting = on_setting,
@ -8094,7 +8186,7 @@ static const struct hcsi_callbacks hcsi_callbacks_client_28 =
static const struct hcsi_callbacks hcsi_callbacks_server_29 =
{
.on_cancel_push = on_cancel_push,
.on_cancel_push = on_cancel_push_server,
.on_max_push_id = on_max_push_id,
.on_settings_frame = on_settings_frame,
.on_setting = on_setting,
@ -8104,7 +8196,7 @@ static const struct hcsi_callbacks hcsi_callbacks_server_29 =
static const struct hcsi_callbacks hcsi_callbacks_client_29 =
{
.on_cancel_push = on_cancel_push,
.on_cancel_push = on_cancel_push_client,
.on_max_push_id = on_max_push_id_client,
.on_settings_frame = on_settings_frame,
.on_setting = on_setting,

View file

@ -108,7 +108,7 @@ send_ctl_expire (struct lsquic_send_ctl *, enum packnum_space,
static void
set_retx_alarm (struct lsquic_send_ctl *, enum packnum_space, lsquic_time_t);
static void
static int
send_ctl_detect_losses (struct lsquic_send_ctl *, enum packnum_space,
lsquic_time_t time);
@ -938,6 +938,18 @@ largest_retx_packet_number (const struct lsquic_send_ctl *ctl,
static void
send_ctl_loss_event (struct lsquic_send_ctl *ctl)
{
ctl->sc_ci->cci_loss(CGP(ctl));
if (ctl->sc_flags & SC_PACE)
lsquic_pacer_loss_event(&ctl->sc_pacer);
ctl->sc_largest_sent_at_cutback =
lsquic_senhist_largest(&ctl->sc_senhist);
}
/* Return true if losses were detected, false otherwise */
static int
send_ctl_detect_losses (struct lsquic_send_ctl *ctl, enum packnum_space pns,
lsquic_time_t time)
{
@ -1002,11 +1014,7 @@ send_ctl_detect_losses (struct lsquic_send_ctl *ctl, enum packnum_space pns,
{
LSQ_DEBUG("detected new loss: packet %"PRIu64"; new lsac: "
"%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
ctl->sc_ci->cci_loss(CGP(ctl));
if (ctl->sc_flags & SC_PACE)
lsquic_pacer_loss_event(&ctl->sc_pacer);
ctl->sc_largest_sent_at_cutback =
lsquic_senhist_largest(&ctl->sc_senhist);
send_ctl_loss_event(ctl);
}
else if (largest_lost_packno)
/* Lost packets whose numbers are smaller than the largest packet
@ -1015,6 +1023,8 @@ send_ctl_detect_losses (struct lsquic_send_ctl *ctl, enum packnum_space pns,
*/
LSQ_DEBUG("ignore loss of packet %"PRIu64" smaller than lsac "
"%"PRIu64, largest_lost_packno, ctl->sc_largest_sent_at_cutback);
return largest_lost_packno > ctl->sc_largest_sent_at_cutback;
}
@ -1043,7 +1053,7 @@ lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
lsquic_packno_t smallest_unacked;
lsquic_packno_t ack2ed[2];
unsigned packet_sz;
int app_limited;
int app_limited, losses_detected;
signed char do_rtt, skip_checks;
enum packnum_space pns;
unsigned ecn_total_acked, ecn_ce_cnt, one_rtt_cnt;
@ -1188,7 +1198,7 @@ lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
}
detect_losses:
send_ctl_detect_losses(ctl, pns, ack_recv_time);
losses_detected = send_ctl_detect_losses(ctl, pns, ack_recv_time);
if (send_ctl_first_unacked_retx_packet(ctl, pns))
set_retx_alarm(ctl, pns, now);
else
@ -1221,7 +1231,18 @@ lsquic_send_ctl_got_ack (lsquic_send_ctl_t *ctl,
if (acki->ecn_counts[ECN_CE] > ctl->sc_ecn_ce_cnt[pns])
{
ctl->sc_ecn_ce_cnt[pns] = acki->ecn_counts[ECN_CE];
LSQ_WARN("TODO: handle ECN CE event"); /* XXX TODO */
if (losses_detected)
/* It's either-or. From [draft-ietf-quic-recovery-29],
* Section 7.4:
" When a loss or ECN-CE marking is detected [...]
*/
LSQ_DEBUG("ECN-CE marking detected, but loss event already "
"accounted for");
else
{
LSQ_DEBUG("ECN-CE marking detected, issue loss event");
send_ctl_loss_event(ctl);
}
}
}
else

View file

@ -1704,6 +1704,7 @@ void
lsquic_stream_shutdown_internal (lsquic_stream_t *stream)
{
LSQ_DEBUG("internal shutdown");
stream->stream_flags |= STREAM_U_READ_DONE|STREAM_U_WRITE_DONE;
if (lsquic_stream_is_critical(stream))
{
LSQ_DEBUG("add flag to force-finish special stream");
@ -3825,13 +3826,16 @@ lsquic_stream_is_pushed (const lsquic_stream_t *stream)
{
enum stream_id_type sit;
if (stream->sm_bflags & SMBF_IETF)
switch (stream->sm_bflags & (SMBF_IETF|SMBF_USE_HEADERS))
{
case SMBF_IETF|SMBF_USE_HEADERS:
sit = stream->id & SIT_MASK;
return sit == SIT_UNI_SERVER;
}
else
case SMBF_USE_HEADERS:
return 1 & ~stream->id;
default:
return 0;
}
}