1
0
mirror of https://gitlab.xiph.org/xiph/icecast-server.git synced 2024-06-30 06:35:23 +00:00

Make the state-machine nature of the EBML parser more evident.

This commit is contained in:
Joseph Wallace 2015-11-21 03:12:33 -05:00
parent 15e7fc6e4a
commit 744b66c40e

View File

@ -39,6 +39,10 @@
#define EBML_HEADER_MAX_SIZE 131072 #define EBML_HEADER_MAX_SIZE 131072
#define EBML_SLICE_SIZE 4096 #define EBML_SLICE_SIZE 4096
typedef enum ebml_read_mode {
EBML_STATE_READING_HEADER = 0,
EBML_STATE_READING_CLUSTERS
} ebml_read_mode;
typedef struct ebml_client_data_st ebml_client_data_t; typedef struct ebml_client_data_st ebml_client_data_t;
@ -51,6 +55,8 @@ struct ebml_client_data_st {
struct ebml_st { struct ebml_st {
ebml_read_mode output_state;
char *cluster_id; char *cluster_id;
int cluster_start; int cluster_start;
@ -58,7 +64,6 @@ struct ebml_st {
unsigned char *input_buffer; unsigned char *input_buffer;
unsigned char *buffer; unsigned char *buffer;
int header_read;
int header_size; int header_size;
int header_position; int header_position;
int header_read_position; int header_read_position;
@ -302,6 +307,8 @@ static ebml_t *ebml_create()
ebml_t *ebml = calloc(1, sizeof(ebml_t)); ebml_t *ebml = calloc(1, sizeof(ebml_t));
ebml->output_state = EBML_STATE_READING_HEADER;
ebml->header = calloc(1, EBML_HEADER_MAX_SIZE); ebml->header = calloc(1, EBML_HEADER_MAX_SIZE);
ebml->buffer = calloc(1, EBML_SLICE_SIZE * 4); ebml->buffer = calloc(1, EBML_SLICE_SIZE * 4);
ebml->input_buffer = calloc(1, EBML_SLICE_SIZE); ebml->input_buffer = calloc(1, EBML_SLICE_SIZE);
@ -322,35 +329,39 @@ static int ebml_read_space(ebml_t *ebml)
int read_space; int read_space;
if (ebml->header_read == 1) switch (ebml->output_state) {
{ case EBML_STATE_READING_HEADER:
/* The header has previously been read */
if (ebml->cluster_start > 0) {
/* return up until just before a new cluster starts */
read_space = ebml->cluster_start;
} else {
/* return most of what we have, but leave enough unread
* to detect the next cluster.
*/
read_space = ebml->position - 4;
}
return read_space; if (ebml->header_size != 0) {
} /* The header can be read */
else return ebml->header_size;
{ } else {
if (ebml->header_size != 0) { /* The header's not ready yet */
/* The header can be read */ return 0;
return ebml->header_size; }
} else { break;
/* The header's not ready yet */
return 0; case EBML_STATE_READING_CLUSTERS:
}
if (ebml->cluster_start > 0) {
/* return up until just before a new cluster starts */
read_space = ebml->cluster_start;
} else {
/* return most of what we have, but leave enough unread
* to detect the next cluster.
*/
read_space = ebml->position - 4;
}
return read_space;
} }
ICECAST_LOG_ERROR("EBML: Invalid parser read state");
return 0;
} }
/* Return a chunk of the EBML/MKV/WebM stream. /* Return a chunk of the EBML/MKV/WebM stream.
* The header will be buffered until it can be returned as one chunk.
* A cluster element's opening tag will always start a new chunk. * A cluster element's opening tag will always start a new chunk.
*/ */
static int ebml_read(ebml_t *ebml, char *buffer, int len) static int ebml_read(ebml_t *ebml, char *buffer, int len)
@ -363,61 +374,63 @@ static int ebml_read(ebml_t *ebml, char *buffer, int len)
return 0; return 0;
} }
if (ebml->header_read == 1) switch (ebml->output_state) {
{ case EBML_STATE_READING_HEADER:
/* The header has previously been read */
if (ebml->cluster_start > 0) {
/* return up until just before a new cluster starts */
read_space = ebml->cluster_start;
} else {
read_space = ebml->position - 4;
}
if (read_space < 1) { if (ebml->header_size != 0)
return 0; {
} /* Can read a chunk of the header */
read_space = ebml->header_size - ebml->header_read_position;
if (read_space >= len ) { if (read_space >= len) {
to_read = len; to_read = len;
} else { } else {
to_read = read_space; to_read = read_space;
} }
memcpy(buffer, ebml->buffer, to_read); memcpy(buffer, ebml->header, to_read);
ebml->header_read_position += to_read;
/* Shift unread data down to the start of the buffer */ if (ebml->header_read_position == ebml->header_size) {
memmove(ebml->buffer, ebml->buffer + to_read, ebml->position - to_read); ebml->output_state = EBML_STATE_READING_CLUSTERS;
ebml->position -= to_read; }
} else {
/* The header's not ready yet */
return 0;
}
if (ebml->cluster_start > 0) { break;
ebml->cluster_start -= to_read;
}
}
else
{
if (ebml->header_size != 0)
{
/* Can read a chunk of the header */
read_space = ebml->header_size - ebml->header_read_position;
if (read_space >= len) { case EBML_STATE_READING_CLUSTERS:
if (ebml->cluster_start > 0) {
/* return up until just before a new cluster starts */
read_space = ebml->cluster_start;
} else {
read_space = ebml->position - 4;
}
if (read_space < 1) {
return 0;
}
if (read_space >= len ) {
to_read = len; to_read = len;
} else { } else {
to_read = read_space; to_read = read_space;
} }
memcpy(buffer, ebml->header, to_read); memcpy(buffer, ebml->buffer, to_read);
ebml->header_read_position += to_read;
if (ebml->header_read_position == ebml->header_size) { /* Shift unread data down to the start of the buffer */
ebml->header_read = 1; memmove(ebml->buffer, ebml->buffer + to_read, ebml->position - to_read);
ebml->position -= to_read;
if (ebml->cluster_start > 0) {
ebml->cluster_start -= to_read;
} }
}
else break;
{
/* The header's not ready yet */
return 0;
}
} }
return to_read; return to_read;