ref: 277338f7488d40d27a9ea8d3a5b2bf1a0f32aa95
parent: 5e7242dfda069046dced13cac8170bfe0fdb5f05
author: Minghai Shang <[email protected]>
date: Tue Jun 24 10:01:17 EDT 2014
[spatial svc]Implement lag in frames for spatial svc Change-Id: I930dced169c9d53f8044d2754a04332138347409
--- a/examples/vp9_spatial_svc_encoder.c
+++ b/examples/vp9_spatial_svc_encoder.c
@@ -296,6 +296,7 @@
int frame_duration = 1; /* 1 timebase tick per frame */
FILE *infile = NULL;
int end_of_stream = 0;
+ int frame_size;
memset(&svc_ctx, 0, sizeof(svc_ctx));
svc_ctx.log_print = 1;
@@ -351,11 +352,10 @@
die_codec(&codec, "Failed to encode frame");
}
if (!(app_input.passes == 2 && app_input.pass == 1)) {
- if (vpx_svc_get_frame_size(&svc_ctx) > 0) {
+ while ((frame_size = vpx_svc_get_frame_size(&svc_ctx)) > 0) {
vpx_video_writer_write_frame(writer,
vpx_svc_get_buffer(&svc_ctx),
- vpx_svc_get_frame_size(&svc_ctx),
- pts);
+ frame_size, pts);
}
}
if (vpx_svc_get_rc_stats_buffer_size(&svc_ctx) > 0) {
--- a/test/svc_test.cc
+++ b/test/svc_test.cc
@@ -265,9 +265,17 @@
video.duration(), VPX_DL_GOOD_QUALITY);
EXPECT_EQ(VPX_CODEC_OK, res);
+ if (vpx_svc_get_frame_size(&svc_) == 0) {
+ // Flush encoder
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+ }
+
+ int frame_size = vpx_svc_get_frame_size(&svc_);
+ EXPECT_GT(frame_size, 0);
const vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
// this test fails with a decoder error
ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
@@ -277,6 +285,9 @@
svc_.spatial_layers = 2;
vpx_svc_set_scale_factors(&svc_, "4/16,16/16");
vpx_svc_set_quantizers(&svc_, "40,30", 0);
+ int decoded_frames = 0;
+ vpx_codec_err_t res_dec;
+ int frame_size;
vpx_codec_err_t res =
vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_);
@@ -291,13 +302,14 @@
// This frame is a keyframe.
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
- ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 1
video.Next();
@@ -305,12 +317,14 @@
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 2
video.Next();
@@ -318,12 +332,29 @@
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ // Flush encoder
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+
+ while ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ EXPECT_EQ(decoded_frames, 3);
}
TEST_F(SvcTest, GetLayerResolution) {
@@ -413,6 +444,9 @@
vpx_codec_destroy(&codec_);
// Second pass encode
+ int decoded_frames = 0;
+ vpx_codec_err_t res_dec;
+ int frame_size;
codec_enc_.g_pass = VPX_RC_LAST_PASS;
codec_enc_.rc_twopass_stats_in.buf = &stats_buf[0];
codec_enc_.rc_twopass_stats_in.sz = stats_buf.size();
@@ -427,12 +461,14 @@
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(1, vpx_svc_is_keyframe(&svc_));
- vpx_codec_err_t res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 1
video.Next();
@@ -440,12 +476,14 @@
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
// FRAME 2
video.Next();
@@ -453,12 +491,29 @@
res = vpx_svc_encode(&svc_, &codec_, video.img(), video.pts(),
video.duration(), VPX_DL_GOOD_QUALITY);
ASSERT_EQ(VPX_CODEC_OK, res);
- EXPECT_EQ(0, vpx_svc_is_keyframe(&svc_));
- res_dec = decoder_->DecodeFrame(
- static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)),
- vpx_svc_get_frame_size(&svc_));
- ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ if ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ // Flush encoder
+ res = vpx_svc_encode(&svc_, &codec_, NULL, 0,
+ video.duration(), VPX_DL_GOOD_QUALITY);
+ EXPECT_EQ(VPX_CODEC_OK, res);
+
+ while ((frame_size = vpx_svc_get_frame_size(&svc_)) > 0) {
+ EXPECT_EQ((decoded_frames == 0), vpx_svc_is_keyframe(&svc_));
+ res_dec = decoder_->DecodeFrame(
+ static_cast<const uint8_t *>(vpx_svc_get_buffer(&svc_)), frame_size);
+ ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError();
+ ++decoded_frames;
+ }
+
+ EXPECT_EQ(decoded_frames, 3);
}
} // namespace
--- a/vp9/encoder/vp9_encoder.c
+++ b/vp9/encoder/vp9_encoder.c
@@ -106,7 +106,7 @@
}
}
-static void set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) {
+void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv) {
MACROBLOCK *const mb = &cpi->mb;
cpi->common.allow_high_precision_mv = allow_high_precision_mv;
if (cpi->common.allow_high_precision_mv) {
@@ -572,7 +572,7 @@
cm->reset_frame_context = 0;
vp9_reset_segment_features(&cm->seg);
- set_high_precision_mv(cpi, 0);
+ vp9_set_high_precision_mv(cpi, 0);
{
int i;
@@ -2117,7 +2117,7 @@
if (!frame_is_intra_only(cm)) {
cm->interp_filter = DEFAULT_INTERP_FILTER;
/* TODO: Decide this more intelligently */
- set_high_precision_mv(cpi, q < HIGH_PRECISION_MV_QTHRESH);
+ vp9_set_high_precision_mv(cpi, q < HIGH_PRECISION_MV_QTHRESH);
}
if (cpi->sf.recode_loop == DISALLOW_RECODE) {
@@ -2298,12 +2298,22 @@
int res = 0;
const int subsampling_x = sd->uv_width < sd->y_width;
const int subsampling_y = sd->uv_height < sd->y_height;
+ const int is_spatial_svc = cpi->use_svc &&
+ (cpi->svc.number_temporal_layers == 1);
check_initial_width(cpi, subsampling_x, subsampling_y);
vpx_usec_timer_start(&timer);
- if (vp9_lookahead_push(cpi->lookahead,
- sd, time_stamp, end_time, frame_flags))
+
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ res = vp9_svc_lookahead_push(cpi, cpi->lookahead, sd, time_stamp, end_time,
+ frame_flags);
+ else
+#endif
+ res = vp9_lookahead_push(cpi->lookahead,
+ sd, time_stamp, end_time, frame_flags);
+ if (res)
res = -1;
vpx_usec_timer_mark(&timer);
cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
@@ -2419,11 +2429,14 @@
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
MV_REFERENCE_FRAME ref_frame;
int arf_src_index;
+ const int is_spatial_svc = cpi->use_svc &&
+ (cpi->svc.number_temporal_layers == 1);
if (!cpi)
return -1;
- if (cpi->svc.number_spatial_layers > 1 && cpi->pass == 2) {
+ if (is_spatial_svc && cpi->pass == 2) {
+ vp9_svc_lookahead_peek(cpi, cpi->lookahead, 0, 1);
vp9_restore_layer_context(cpi);
}
@@ -2432,7 +2445,7 @@
cpi->source = NULL;
cpi->last_source = NULL;
- set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
+ vp9_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
// Normal defaults
cm->reset_frame_context = 0;
@@ -2446,7 +2459,14 @@
if (arf_src_index) {
assert(arf_src_index <= rc->frames_to_key);
- if ((cpi->source = vp9_lookahead_peek(cpi->lookahead, arf_src_index))) {
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ cpi->source = vp9_svc_lookahead_peek(cpi, cpi->lookahead,
+ arf_src_index, 1);
+ else
+#endif
+ cpi->source = vp9_lookahead_peek(cpi->lookahead, arf_src_index);
+ if (cpi->source != NULL) {
cpi->alt_ref_source = cpi->source;
if (cpi->oxcf.arnr_max_frames > 0) {
@@ -2472,12 +2492,24 @@
if (!cpi->source) {
// Get last frame source.
if (cm->current_video_frame > 0) {
- if ((cpi->last_source = vp9_lookahead_peek(cpi->lookahead, -1)) == NULL)
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ cpi->last_source = vp9_svc_lookahead_peek(cpi, cpi->lookahead, -1, 0);
+ else
+#endif
+ cpi->last_source = vp9_lookahead_peek(cpi->lookahead, -1);
+ if (cpi->last_source == NULL)
return -1;
}
// Read in the source frame.
- if ((cpi->source = vp9_lookahead_pop(cpi->lookahead, flush))) {
+#ifdef CONFIG_SPATIAL_SVC
+ if (is_spatial_svc)
+ cpi->source = vp9_svc_lookahead_pop(cpi, cpi->lookahead, flush);
+ else
+#endif
+ cpi->source = vp9_lookahead_pop(cpi->lookahead, flush);
+ if (cpi->source != NULL) {
cm->show_frame = 1;
cm->intra_only = 0;
@@ -2499,7 +2531,9 @@
*time_stamp = cpi->source->ts_start;
*time_end = cpi->source->ts_end;
- *frame_flags = cpi->source->flags;
+ *frame_flags =
+ (cpi->source->flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
+
} else {
*size = 0;
if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) {
@@ -2829,4 +2863,43 @@
int vp9_get_quantizer(VP9_COMP *cpi) {
return cpi->common.base_qindex;
+}
+
+void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags) {
+ if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_REF_ARF)) {
+ int ref = 7;
+
+ if (flags & VP8_EFLAG_NO_REF_LAST)
+ ref ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_GF)
+ ref ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_REF_ARF)
+ ref ^= VP9_ALT_FLAG;
+
+ vp9_use_as_reference(cpi, ref);
+ }
+
+ if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF |
+ VP8_EFLAG_FORCE_ARF)) {
+ int upd = 7;
+
+ if (flags & VP8_EFLAG_NO_UPD_LAST)
+ upd ^= VP9_LAST_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_GF)
+ upd ^= VP9_GOLD_FLAG;
+
+ if (flags & VP8_EFLAG_NO_UPD_ARF)
+ upd ^= VP9_ALT_FLAG;
+
+ vp9_update_reference(cpi, upd);
+ }
+
+ if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
+ vp9_update_entropy(cpi, 0);
+ }
}
--- a/vp9/encoder/vp9_encoder.h
+++ b/vp9/encoder/vp9_encoder.h
@@ -517,9 +517,13 @@
int64_t vp9_rescale(int64_t val, int64_t num, int denom);
+void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv);
+
YV12_BUFFER_CONFIG *vp9_scale_if_required(VP9_COMMON *cm,
YV12_BUFFER_CONFIG *unscaled,
YV12_BUFFER_CONFIG *scaled);
+
+void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags);
static INLINE void set_ref_ptrs(VP9_COMMON *cm, MACROBLOCKD *xd,
MV_REFERENCE_FRAME ref0,
--- a/vp9/encoder/vp9_lookahead.c
+++ b/vp9/encoder/vp9_lookahead.c
@@ -18,18 +18,6 @@
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_lookahead.h"
-// The max of past frames we want to keep in the queue.
-#define MAX_PRE_FRAMES 1
-
-struct lookahead_ctx {
- unsigned int max_sz; /* Absolute size of the queue */
- unsigned int sz; /* Number of buffers currently in the queue */
- unsigned int read_idx; /* Read index */
- unsigned int write_idx; /* Write index */
- struct lookahead_entry *buf; /* Buffer list */
-};
-
-
/* Return the buffer at the given absolute index and increment the index */
static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
unsigned int *idx) {
--- a/vp9/encoder/vp9_lookahead.h
+++ b/vp9/encoder/vp9_lookahead.h
@@ -14,6 +14,11 @@
#include "vpx_scale/yv12config.h"
#include "vpx/vpx_integer.h"
+#ifdef CONFIG_SPATIAL_SVC
+#include "vpx/vp8cx.h"
+#include "vpx/vpx_encoder.h"
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -25,10 +30,22 @@
int64_t ts_start;
int64_t ts_end;
unsigned int flags;
+
+#ifdef CONFIG_SPATIAL_SVC
+ vpx_svc_parameters_t svc_params[VPX_SS_MAX_LAYERS];
+#endif
};
+// The max of past frames we want to keep in the queue.
+#define MAX_PRE_FRAMES 1
-struct lookahead_ctx;
+struct lookahead_ctx {
+ unsigned int max_sz; /* Absolute size of the queue */
+ unsigned int sz; /* Number of buffers currently in the queue */
+ unsigned int read_idx; /* Read index */
+ unsigned int write_idx; /* Write index */
+ struct lookahead_entry *buf; /* Buffer list */
+};
/**\brief Initializes the lookahead stage
*
--- a/vp9/encoder/vp9_svc_layercontext.c
+++ b/vp9/encoder/vp9_svc_layercontext.c
@@ -12,6 +12,7 @@
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
+#include "vp9/encoder/vp9_extend.h"
void vp9_init_layer_context(VP9_COMP *const cpi) {
SVC *const svc = &cpi->svc;
@@ -208,4 +209,102 @@
cpi->svc.number_temporal_layers == 1 &&
cpi->svc.spatial_layer_id > 0 &&
cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame;
+}
+
+int vp9_svc_lookahead_push(const VP9_COMP *const cpi, struct lookahead_ctx *ctx,
+ YV12_BUFFER_CONFIG *src, int64_t ts_start,
+ int64_t ts_end, unsigned int flags) {
+ struct lookahead_entry *buf;
+ int i, index;
+
+ if (vp9_lookahead_push(ctx, src, ts_start, ts_end, flags))
+ return 1;
+
+ index = ctx->write_idx - 1;
+ if (index < 0)
+ index += ctx->max_sz;
+
+ buf = ctx->buf + index;
+
+ if (buf == NULL)
+ return 1;
+
+ // Store svc parameters for each layer
+ for (i = 0; i < cpi->svc.number_spatial_layers; ++i)
+ buf->svc_params[i] = cpi->svc.layer_context[i].svc_params_received;
+
+ return 0;
+}
+
+static int copy_svc_params(VP9_COMP *const cpi, struct lookahead_entry *buf) {
+ int layer_id;
+ vpx_svc_parameters_t *layer_param;
+ vpx_enc_frame_flags_t flags;
+
+ // Find the next layer to be encoded
+ for (layer_id = 0; layer_id < cpi->svc.number_spatial_layers; ++layer_id) {
+ if (buf->svc_params[layer_id].spatial_layer >=0)
+ break;
+ }
+
+ if (layer_id == cpi->svc.number_spatial_layers)
+ return 1;
+
+ layer_param = &buf->svc_params[layer_id];
+ buf->flags = flags = layer_param->flags;
+ cpi->svc.spatial_layer_id = layer_param->spatial_layer;
+ cpi->svc.temporal_layer_id = layer_param->temporal_layer;
+ cpi->lst_fb_idx = layer_param->lst_fb_idx;
+ cpi->gld_fb_idx = layer_param->gld_fb_idx;
+ cpi->alt_fb_idx = layer_param->alt_fb_idx;
+
+ if (vp9_set_size_literal(cpi, layer_param->width, layer_param->height) != 0)
+ return VPX_CODEC_INVALID_PARAM;
+
+ cpi->oxcf.worst_allowed_q =
+ vp9_quantizer_to_qindex(layer_param->max_quantizer);
+ cpi->oxcf.best_allowed_q =
+ vp9_quantizer_to_qindex(layer_param->min_quantizer);
+
+ vp9_change_config(cpi, &cpi->oxcf);
+
+ vp9_set_high_precision_mv(cpi, 1);
+
+ // Retrieve the encoding flags for each layer and apply it to encoder.
+ // It includes reference frame flags and update frame flags.
+ vp9_apply_encoding_flags(cpi, flags);
+
+ return 0;
+}
+
+struct lookahead_entry *vp9_svc_lookahead_peek(VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int index, int copy_params) {
+ struct lookahead_entry *buf = vp9_lookahead_peek(ctx, index);
+
+ if (buf != NULL && copy_params != 0) {
+ if (copy_svc_params(cpi, buf) != 0)
+ return NULL;
+ }
+ return buf;
+}
+
+struct lookahead_entry *vp9_svc_lookahead_pop(VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int drain) {
+ struct lookahead_entry *buf = NULL;
+
+ if (ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) {
+ buf = vp9_svc_lookahead_peek(cpi, ctx, 0, 1);
+ if (buf != NULL) {
+ // Only remove the buffer when pop the highest layer. Simply set the
+ // spatial_layer to -1 for lower layers.
+ buf->svc_params[cpi->svc.spatial_layer_id].spatial_layer = -1;
+ if (cpi->svc.spatial_layer_id == cpi->svc.number_spatial_layers - 1) {
+ vp9_lookahead_pop(ctx, drain);
+ }
+ }
+ }
+
+ return buf;
}
--- a/vp9/encoder/vp9_svc_layercontext.h
+++ b/vp9/encoder/vp9_svc_layercontext.h
@@ -28,6 +28,7 @@
struct vpx_fixed_buf rc_twopass_stats_in;
unsigned int current_video_frame_in_layer;
int is_key_frame;
+ vpx_svc_parameters_t svc_params_received;
} LAYER_CONTEXT;
typedef struct {
@@ -73,6 +74,23 @@
// Check if current layer is key frame in spatial upper layer
int vp9_is_upper_layer_key_frame(const struct VP9_COMP *const cpi);
+
+// Copy the source image, flags and svc parameters into a new framebuffer
+// with the expected stride/border
+int vp9_svc_lookahead_push(const struct VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+ int64_t ts_start, int64_t ts_end,
+ unsigned int flags);
+
+// Get the next source buffer to encode
+struct lookahead_entry *vp9_svc_lookahead_pop(struct VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int drain);
+
+// Get a future source buffer to encode
+struct lookahead_entry *vp9_svc_lookahead_peek(struct VP9_COMP *const cpi,
+ struct lookahead_ctx *ctx,
+ int index, int copy_params);
#ifdef __cplusplus
} // extern "C"
--- a/vp9/vp9_cx_iface.c
+++ b/vp9/vp9_cx_iface.c
@@ -88,8 +88,8 @@
size_t pending_frame_magnitude;
vpx_image_t preview_img;
vp8_postproc_cfg_t preview_ppcfg;
- vpx_codec_pkt_list_decl(64) pkt_list;
- unsigned int fixed_kf_cntr;
+ vpx_codec_pkt_list_decl(128) pkt_list;
+ unsigned int fixed_kf_cntr;
};
static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
@@ -795,43 +795,8 @@
return VPX_CODEC_INVALID_PARAM;
}
- if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_REF_ARF)) {
- int ref = 7;
+ vp9_apply_encoding_flags(ctx->cpi, flags);
- if (flags & VP8_EFLAG_NO_REF_LAST)
- ref ^= VP9_LAST_FLAG;
-
- if (flags & VP8_EFLAG_NO_REF_GF)
- ref ^= VP9_GOLD_FLAG;
-
- if (flags & VP8_EFLAG_NO_REF_ARF)
- ref ^= VP9_ALT_FLAG;
-
- vp9_use_as_reference(ctx->cpi, ref);
- }
-
- if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF |
- VP8_EFLAG_FORCE_ARF)) {
- int upd = 7;
-
- if (flags & VP8_EFLAG_NO_UPD_LAST)
- upd ^= VP9_LAST_FLAG;
-
- if (flags & VP8_EFLAG_NO_UPD_GF)
- upd ^= VP9_GOLD_FLAG;
-
- if (flags & VP8_EFLAG_NO_UPD_ARF)
- upd ^= VP9_ALT_FLAG;
-
- vp9_update_reference(ctx->cpi, upd);
- }
-
- if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
- vp9_update_entropy(ctx->cpi, 0);
- }
-
// Handle fixed keyframe intervals
if (ctx->cfg.kf_mode == VPX_KF_AUTO &&
ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) {
@@ -843,7 +808,7 @@
// Initialize the encoder instance on the first frame.
if (res == VPX_CODEC_OK && ctx->cpi != NULL) {
- unsigned int lib_flags;
+ unsigned int lib_flags = 0;
YV12_BUFFER_CONFIG sd;
int64_t dst_time_stamp, dst_end_time_stamp;
size_t size, cx_data_sz;
@@ -853,9 +818,6 @@
if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
((VP9_COMP *)ctx->cpi)->b_calculate_psnr = 1;
- // Convert API flags to internal codec lib flags
- lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
-
/* vp9 use 10,000,000 ticks/second as time stamp */
dst_time_stamp = (pts * 10000000 * ctx->cfg.g_timebase.num)
/ ctx->cfg.g_timebase.den;
@@ -865,7 +827,9 @@
if (img != NULL) {
res = image2yuvconfig(img, &sd);
- if (vp9_receive_raw_frame(ctx->cpi, lib_flags,
+ // Store the original flags in to the frame buffer. Will extract the
+ // key frame flag when we actually encode this frame.
+ if (vp9_receive_raw_frame(ctx->cpi, flags,
&sd, dst_time_stamp, dst_end_time_stamp)) {
VP9_COMP *cpi = (VP9_COMP *)ctx->cpi;
res = update_error_state(ctx, &cpi->common.error);
@@ -874,7 +838,6 @@
cx_data = ctx->cx_data;
cx_data_sz = ctx->cx_data_sz;
- lib_flags = 0;
/* Any pending invisible frames? */
if (ctx->pending_cx_data) {
@@ -902,7 +865,12 @@
VP9_COMP *const cpi = (VP9_COMP *)ctx->cpi;
// Pack invisible frames with the next visible frame
- if (cpi->common.show_frame == 0) {
+ if (cpi->common.show_frame == 0
+#ifdef CONFIG_SPATIAL_SVC
+ || (cpi->use_svc && cpi->svc.number_temporal_layers == 1 &&
+ cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)
+#endif
+ ) {
if (ctx->pending_cx_data == 0)
ctx->pending_cx_data = cx_data;
ctx->pending_cx_data_sz += size;
@@ -925,7 +893,12 @@
/ ctx->cfg.g_timebase.num / 10000000);
pkt.data.frame.flags = lib_flags << 16;
- if (lib_flags & FRAMEFLAGS_KEY)
+ if (lib_flags & FRAMEFLAGS_KEY
+#ifdef CONFIG_SPATIAL_SVC
+ || (cpi->use_svc && cpi->svc.number_temporal_layers == 1 &&
+ cpi->svc.layer_context[0].is_key_frame)
+#endif
+ )
pkt.data.frame.flags |= VPX_FRAME_IS_KEY;
if (cpi->common.show_frame == 0) {
@@ -1165,24 +1138,19 @@
VP9_COMP *const cpi = ctx->cpi;
vpx_svc_parameters_t *const params = va_arg(args, vpx_svc_parameters_t *);
- if (params == NULL)
+ if (params == NULL || params->spatial_layer < 0 ||
+ params->spatial_layer >= cpi->svc.number_spatial_layers)
return VPX_CODEC_INVALID_PARAM;
- cpi->svc.spatial_layer_id = params->spatial_layer;
- cpi->svc.temporal_layer_id = params->temporal_layer;
+ if (params->spatial_layer == 0) {
+ int i;
+ for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
+ cpi->svc.layer_context[i].svc_params_received.spatial_layer = -1;
+ }
+ }
- cpi->lst_fb_idx = params->lst_fb_idx;
- cpi->gld_fb_idx = params->gld_fb_idx;
- cpi->alt_fb_idx = params->alt_fb_idx;
-
- if (vp9_set_size_literal(ctx->cpi, params->width, params->height) != 0)
- return VPX_CODEC_INVALID_PARAM;
-
- ctx->cfg.rc_max_quantizer = params->max_quantizer;
- ctx->cfg.rc_min_quantizer = params->min_quantizer;
-
- set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
- vp9_change_config(ctx->cpi, &ctx->oxcf);
+ cpi->svc.layer_context[params->spatial_layer].svc_params_received =
+ *params;
return VPX_CODEC_OK;
}
--- a/vpx/src/svc_encodeframe.c
+++ b/vpx/src/svc_encodeframe.c
@@ -24,6 +24,7 @@
#include "vpx/svc_context.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
+#include "vpx_mem/vpx_mem.h"
#ifdef __MINGW32__
#define strtok_r strtok_s
@@ -47,6 +48,14 @@
static const char *DEFAULT_QUANTIZER_VALUES = "60,53,39,33,27";
static const char *DEFAULT_SCALE_FACTORS = "4/16,5/16,7/16,11/16,16/16";
+// One encoded frame
+typedef struct FrameData {
+ void *buf; // compressed data buffer
+ size_t size; // length of compressed data
+ vpx_codec_frame_flags_t flags; /**< flags for this frame */
+ struct FrameData *next;
+} FrameData;
+
typedef struct SvcInternal {
char options[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_options
char quantizers[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_quantizers
@@ -72,6 +81,7 @@
// state variables
int encode_frame_count;
+ int frame_received;
int frame_within_gop;
vpx_enc_frame_flags_t enc_frame_flags;
int layers;
@@ -78,9 +88,8 @@
int layer;
int is_keyframe;
- size_t frame_size;
- size_t buffer_size;
- void *buffer;
+ FrameData *frame_list;
+ FrameData *frame_temp;
char *rc_stats_buf;
size_t rc_stats_buf_size;
@@ -90,53 +99,37 @@
vpx_codec_ctx_t *codec_ctx;
} SvcInternal;
-// Superframe is used to generate an index of individual frames (i.e., layers)
-struct Superframe {
- int count;
- uint32_t sizes[SUPERFRAME_SLOTS];
- uint32_t magnitude;
- uint8_t buffer[SUPERFRAME_BUFFER_SIZE];
- size_t index_size;
-};
-
-// One encoded frame layer
-struct LayerData {
- void *buf; // compressed data buffer
- size_t size; // length of compressed data
- struct LayerData *next;
-};
-
-// create LayerData from encoder output
-static struct LayerData *ld_create(void *buf, size_t size) {
- struct LayerData *const layer_data =
- (struct LayerData *)malloc(sizeof(*layer_data));
- if (layer_data == NULL) {
+// create FrameData from encoder output
+static struct FrameData *fd_create(void *buf, size_t size,
+ vpx_codec_frame_flags_t flags) {
+ struct FrameData *const frame_data =
+ (struct FrameData *)vpx_malloc(sizeof(*frame_data));
+ if (frame_data == NULL) {
return NULL;
}
- layer_data->buf = malloc(size);
- if (layer_data->buf == NULL) {
- free(layer_data);
+ frame_data->buf = vpx_malloc(size);
+ if (frame_data->buf == NULL) {
+ vpx_free(frame_data);
return NULL;
}
- memcpy(layer_data->buf, buf, size);
- layer_data->size = size;
- return layer_data;
+ vpx_memcpy(frame_data->buf, buf, size);
+ frame_data->size = size;
+ frame_data->flags = flags;
+ return frame_data;
}
-// free LayerData
-static void ld_free(struct LayerData *layer_data) {
- if (layer_data) {
- if (layer_data->buf) {
- free(layer_data->buf);
- layer_data->buf = NULL;
- }
- free(layer_data);
+// free FrameData
+static void fd_free(struct FrameData *p) {
+ if (p) {
+ if (p->buf)
+ vpx_free(p->buf);
+ vpx_free(p);
}
}
-// add layer data to list
-static void ld_list_add(struct LayerData **list, struct LayerData *layer_data) {
- struct LayerData **p = list;
+// add FrameData to list
+static void fd_list_add(struct FrameData **list, struct FrameData *layer_data) {
+ struct FrameData **p = list;
while (*p != NULL) p = &(*p)->next;
*p = layer_data;
@@ -143,75 +136,17 @@
layer_data->next = NULL;
}
-// get accumulated size of layer data
-static size_t ld_list_get_buffer_size(struct LayerData *list) {
- struct LayerData *p;
- size_t size = 0;
+// free FrameData list
+static void fd_free_list(struct FrameData *list) {
+ struct FrameData *p = list;
- for (p = list; p != NULL; p = p->next) {
- size += p->size;
- }
- return size;
-}
-
-// copy layer data to buffer
-static void ld_list_copy_to_buffer(struct LayerData *list, uint8_t *buffer) {
- struct LayerData *p;
-
- for (p = list; p != NULL; p = p->next) {
- buffer[0] = 1;
- memcpy(buffer, p->buf, p->size);
- buffer += p->size;
- }
-}
-
-// free layer data list
-static void ld_list_free(struct LayerData *list) {
- struct LayerData *p = list;
-
while (p) {
list = list->next;
- ld_free(p);
+ fd_free(p);
p = list;
}
}
-static void sf_create_index(struct Superframe *sf) {
- uint8_t marker = 0xc0;
- int i;
- uint32_t mag, mask;
- uint8_t *bufp;
-
- if (sf->count == 0 || sf->count >= 8) return;
-
- // Add the number of frames to the marker byte
- marker |= sf->count - 1;
-
- // Choose the magnitude
- for (mag = 0, mask = 0xff; mag < 4; ++mag) {
- if (sf->magnitude < mask) break;
- mask <<= 8;
- mask |= 0xff;
- }
- marker |= mag << 3;
-
- // Write the index
- sf->index_size = 2 + (mag + 1) * sf->count;
- bufp = sf->buffer;
-
- *bufp++ = marker;
- for (i = 0; i < sf->count; ++i) {
- int this_sz = sf->sizes[i];
- uint32_t j;
-
- for (j = 0; j <= mag; ++j) {
- *bufp++ = this_sz & 0xff;
- this_sz >>= 8;
- }
- }
- *bufp++ = marker;
-}
-
static SvcInternal *get_svc_internal(SvcContext *svc_ctx) {
if (svc_ctx == NULL) return NULL;
if (svc_ctx->internal == NULL) {
@@ -574,8 +509,6 @@
// modify encoder configuration
enc_cfg->ss_number_layers = si->layers;
enc_cfg->ts_number_layers = 1; // Temporal layers not used in this encoder.
- // Lag in frames not currently supported
- enc_cfg->g_lag_in_frames = 0;
// TODO(ivanmaltz): determine if these values need to be set explicitly for
// svc, or if the normal default/override mechanism can be used
@@ -608,6 +541,34 @@
return VPX_CODEC_OK;
}
+static void accumulate_frame_size_for_each_layer(SvcInternal *const si,
+ const uint8_t *const buf,
+ const size_t size) {
+ uint8_t marker = buf[size - 1];
+ if ((marker & 0xe0) == 0xc0) {
+ const uint32_t frames = (marker & 0x7) + 1;
+ const uint32_t mag = ((marker >> 3) & 0x3) + 1;
+ const size_t index_sz = 2 + mag * frames;
+
+ uint8_t marker2 = buf[size - index_sz];
+
+ if (size >= index_sz && marker2 == marker) {
+ // found a valid superframe index
+ uint32_t i, j;
+ const uint8_t *x = &buf[size - index_sz + 1];
+
+ // frames has a maximum of 8 and mag has a maximum of 4.
+ for (i = 0; i < frames; i++) {
+ uint32_t this_sz = 0;
+
+ for (j = 0; j < mag; j++)
+ this_sz |= (*x++) << (j * 8);
+ si->bytes_sum[i] += this_sz;
+ }
+ }
+ }
+}
+
// SVC Algorithm flags - these get mapped to VP8_EFLAG_* defined in vp8cx.h
// encoder should reference the last frame
@@ -846,15 +807,12 @@
vpx_codec_err_t res;
vpx_codec_iter_t iter;
const vpx_codec_cx_pkt_t *cx_pkt;
- struct LayerData *cx_layer_list = NULL;
- struct LayerData *layer_data;
- struct Superframe superframe;
+ int layer_for_psnr = 0;
SvcInternal *const si = get_svc_internal(svc_ctx);
if (svc_ctx == NULL || codec_ctx == NULL || si == NULL) {
return VPX_CODEC_INVALID_PARAM;
}
- memset(&superframe, 0, sizeof(superframe));
svc_log_reset(svc_ctx);
si->rc_stats_buf_used = 0;
@@ -863,7 +821,6 @@
si->frame_within_gop = 0;
}
si->is_keyframe = (si->frame_within_gop == 0);
- si->frame_size = 0;
if (rawimg != NULL) {
svc_log(svc_ctx, SVC_LOG_DEBUG,
@@ -872,124 +829,90 @@
si->frame_within_gop);
}
- // encode each layer
- for (si->layer = 0; si->layer < si->layers; ++si->layer) {
- if (svc_ctx->encoding_mode == ALT_INTER_LAYER_PREDICTION_IP &&
- si->is_keyframe && (si->layer == 1 || si->layer == 3)) {
- svc_log(svc_ctx, SVC_LOG_DEBUG, "Skip encoding layer %d\n", si->layer);
- continue;
- }
-
- if (rawimg != NULL) {
+ if (rawimg != NULL) {
+ // encode each layer
+ for (si->layer = 0; si->layer < si->layers; ++si->layer) {
+ if (svc_ctx->encoding_mode == ALT_INTER_LAYER_PREDICTION_IP &&
+ si->is_keyframe && (si->layer == 1 || si->layer == 3)) {
+ svc_log(svc_ctx, SVC_LOG_DEBUG, "Skip encoding layer %d\n", si->layer);
+ continue;
+ }
calculate_enc_frame_flags(svc_ctx);
set_svc_parameters(svc_ctx, codec_ctx);
}
+ }
- res = vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration,
- si->enc_frame_flags, deadline);
- if (res != VPX_CODEC_OK) {
- return res;
- }
- // save compressed data
- iter = NULL;
- while ((cx_pkt = vpx_codec_get_cx_data(codec_ctx, &iter))) {
- switch (cx_pkt->kind) {
- case VPX_CODEC_CX_FRAME_PKT: {
- const uint32_t frame_pkt_size = (uint32_t)(cx_pkt->data.frame.sz);
- si->bytes_sum[si->layer] += frame_pkt_size;
- svc_log(svc_ctx, SVC_LOG_DEBUG,
- "SVC frame: %d, layer: %d, size: %u\n",
- si->encode_frame_count, si->layer, frame_pkt_size);
- layer_data =
- ld_create(cx_pkt->data.frame.buf, (size_t)frame_pkt_size);
- if (layer_data == NULL) {
- svc_log(svc_ctx, SVC_LOG_ERROR, "Error allocating LayerData\n");
- return VPX_CODEC_OK;
- }
- ld_list_add(&cx_layer_list, layer_data);
+ res = vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0,
+ deadline);
+ if (res != VPX_CODEC_OK) {
+ return res;
+ }
+ // save compressed data
+ iter = NULL;
+ while ((cx_pkt = vpx_codec_get_cx_data(codec_ctx, &iter))) {
+ switch (cx_pkt->kind) {
+ case VPX_CODEC_CX_FRAME_PKT: {
+ fd_list_add(&si->frame_list, fd_create(cx_pkt->data.frame.buf,
+ cx_pkt->data.frame.sz,
+ cx_pkt->data.frame.flags));
+ accumulate_frame_size_for_each_layer(si, cx_pkt->data.frame.buf,
+ cx_pkt->data.frame.sz);
- // save layer size in superframe index
- superframe.sizes[superframe.count++] = frame_pkt_size;
- superframe.magnitude |= frame_pkt_size;
- break;
- }
- case VPX_CODEC_PSNR_PKT: {
- int i;
- svc_log(svc_ctx, SVC_LOG_DEBUG,
- "SVC frame: %d, layer: %d, PSNR(Total/Y/U/V): "
- "%2.3f %2.3f %2.3f %2.3f \n",
- si->encode_frame_count, si->layer,
- cx_pkt->data.psnr.psnr[0], cx_pkt->data.psnr.psnr[1],
- cx_pkt->data.psnr.psnr[2], cx_pkt->data.psnr.psnr[3]);
- svc_log(svc_ctx, SVC_LOG_DEBUG,
- "SVC frame: %d, layer: %d, SSE(Total/Y/U/V): "
- "%2.3f %2.3f %2.3f %2.3f \n",
- si->encode_frame_count, si->layer,
- cx_pkt->data.psnr.sse[0], cx_pkt->data.psnr.sse[1],
- cx_pkt->data.psnr.sse[2], cx_pkt->data.psnr.sse[3]);
- for (i = 0; i < COMPONENTS; i++) {
- si->psnr_sum[si->layer][i] += cx_pkt->data.psnr.psnr[i];
- si->sse_sum[si->layer][i] += cx_pkt->data.psnr.sse[i];
- }
- break;
- }
- case VPX_CODEC_STATS_PKT: {
- size_t new_size = si->rc_stats_buf_used +
- cx_pkt->data.twopass_stats.sz;
+ svc_log(svc_ctx, SVC_LOG_DEBUG, "SVC frame: %d, kf: %d, size: %d, "
+ "pts: %d\n", si->frame_received,
+ (cx_pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? 1 : 0,
+ (int)cx_pkt->data.frame.sz, (int)cx_pkt->data.frame.pts);
- if (new_size > si->rc_stats_buf_size) {
- char *p = (char*)realloc(si->rc_stats_buf, new_size);
- if (p == NULL) {
- svc_log(svc_ctx, SVC_LOG_ERROR, "Error allocating stats buf\n");
- break;
- }
- si->rc_stats_buf = p;
- si->rc_stats_buf_size = new_size;
- }
-
- memcpy(si->rc_stats_buf + si->rc_stats_buf_used,
- cx_pkt->data.twopass_stats.buf, cx_pkt->data.twopass_stats.sz);
- si->rc_stats_buf_used += cx_pkt->data.twopass_stats.sz;
- break;
+ ++si->frame_received;
+ layer_for_psnr = 0;
+ break;
+ }
+ case VPX_CODEC_PSNR_PKT: {
+ int i;
+ svc_log(svc_ctx, SVC_LOG_DEBUG,
+ "SVC frame: %d, layer: %d, PSNR(Total/Y/U/V): "
+ "%2.3f %2.3f %2.3f %2.3f \n",
+ si->frame_received, layer_for_psnr,
+ cx_pkt->data.psnr.psnr[0], cx_pkt->data.psnr.psnr[1],
+ cx_pkt->data.psnr.psnr[2], cx_pkt->data.psnr.psnr[3]);
+ svc_log(svc_ctx, SVC_LOG_DEBUG,
+ "SVC frame: %d, layer: %d, SSE(Total/Y/U/V): "
+ "%2.3f %2.3f %2.3f %2.3f \n",
+ si->frame_received, layer_for_psnr,
+ cx_pkt->data.psnr.sse[0], cx_pkt->data.psnr.sse[1],
+ cx_pkt->data.psnr.sse[2], cx_pkt->data.psnr.sse[3]);
+ for (i = 0; i < COMPONENTS; i++) {
+ si->psnr_sum[layer_for_psnr][i] += cx_pkt->data.psnr.psnr[i];
+ si->sse_sum[layer_for_psnr][i] += cx_pkt->data.psnr.sse[i];
}
- default: {
- break;
- }
+ ++layer_for_psnr;
+ break;
}
- }
- if (rawimg == NULL) {
- break;
- }
- }
- if (codec_ctx->config.enc->g_pass != VPX_RC_FIRST_PASS) {
- // add superframe index to layer data list
- sf_create_index(&superframe);
- layer_data = ld_create(superframe.buffer, superframe.index_size);
- ld_list_add(&cx_layer_list, layer_data);
+ case VPX_CODEC_STATS_PKT: {
+ size_t new_size = si->rc_stats_buf_used +
+ cx_pkt->data.twopass_stats.sz;
- // get accumulated size of layer data
- si->frame_size = ld_list_get_buffer_size(cx_layer_list);
- if (si->frame_size > 0) {
- // all layers encoded, create single buffer with concatenated layers
- if (si->frame_size > si->buffer_size) {
- free(si->buffer);
- si->buffer = malloc(si->frame_size);
- if (si->buffer == NULL) {
- ld_list_free(cx_layer_list);
- return VPX_CODEC_MEM_ERROR;
+ if (new_size > si->rc_stats_buf_size) {
+ char *p = (char*)realloc(si->rc_stats_buf, new_size);
+ if (p == NULL) {
+ svc_log(svc_ctx, SVC_LOG_ERROR, "Error allocating stats buf\n");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ si->rc_stats_buf = p;
+ si->rc_stats_buf_size = new_size;
}
- si->buffer_size = si->frame_size;
- }
- // copy layer data into packet
- ld_list_copy_to_buffer(cx_layer_list, (uint8_t *)si->buffer);
- ld_list_free(cx_layer_list);
-
- svc_log(svc_ctx, SVC_LOG_DEBUG, "SVC frame: %d, kf: %d, size: %d, "
- "pts: %d\n", si->encode_frame_count, si->is_keyframe,
- (int)si->frame_size, (int)pts);
+ memcpy(si->rc_stats_buf + si->rc_stats_buf_used,
+ cx_pkt->data.twopass_stats.buf, cx_pkt->data.twopass_stats.sz);
+ si->rc_stats_buf_used += cx_pkt->data.twopass_stats.sz;
+ break;
+ }
+ default: {
+ break;
+ }
}
}
+
if (rawimg != NULL) {
++si->frame_within_gop;
++si->encode_frame_count;
@@ -1004,16 +927,27 @@
return si->message_buffer;
}
-void *vpx_svc_get_buffer(const SvcContext *svc_ctx) {
- const SvcInternal *const si = get_const_svc_internal(svc_ctx);
- if (svc_ctx == NULL || si == NULL) return NULL;
- return si->buffer;
+// We will maintain a list of output frame buffers since with lag_in_frame
+// we need to output all frame buffers at the end. vpx_svc_get_buffer() will
+// remove a frame buffer from the list the put it to a temporal pointer, which
+// will be removed at the next vpx_svc_get_buffer() or when closing encoder.
+void *vpx_svc_get_buffer(SvcContext *svc_ctx) {
+ SvcInternal *const si = get_svc_internal(svc_ctx);
+ if (svc_ctx == NULL || si == NULL || si->frame_list == NULL) return NULL;
+
+ if (si->frame_temp)
+ fd_free(si->frame_temp);
+
+ si->frame_temp = si->frame_list;
+ si->frame_list = si->frame_list->next;
+
+ return si->frame_temp->buf;
}
size_t vpx_svc_get_frame_size(const SvcContext *svc_ctx) {
const SvcInternal *const si = get_const_svc_internal(svc_ctx);
- if (svc_ctx == NULL || si == NULL) return 0;
- return si->frame_size;
+ if (svc_ctx == NULL || si == NULL || si->frame_list == NULL) return 0;
+ return si->frame_list->size;
}
int vpx_svc_get_encode_frame_count(const SvcContext *svc_ctx) {
@@ -1024,8 +958,8 @@
int vpx_svc_is_keyframe(const SvcContext *svc_ctx) {
const SvcInternal *const si = get_const_svc_internal(svc_ctx);
- if (svc_ctx == NULL || si == NULL) return 0;
- return si->is_keyframe;
+ if (svc_ctx == NULL || si == NULL || si->frame_list == NULL) return 0;
+ return (si->frame_list->flags & VPX_FRAME_IS_KEY) != 0;
}
void vpx_svc_set_keyframe(SvcContext *svc_ctx) {
@@ -1112,7 +1046,8 @@
// SvcInternal if it was not already allocated
si = (SvcInternal *)svc_ctx->internal;
if (si != NULL) {
- free(si->buffer);
+ fd_free(si->frame_temp);
+ fd_free_list(si->frame_list);
if (si->rc_stats_buf) {
free(si->rc_stats_buf);
}
--- a/vpx/svc_context.h
+++ b/vpx/svc_context.h
@@ -104,14 +104,16 @@
const char *vpx_svc_get_message(const SvcContext *svc_ctx);
/**
- * return size of encoded data to be returned by vpx_svc_get_buffer
+ * return size of encoded data to be returned by vpx_svc_get_buffer.
+ * it needs to be called before vpx_svc_get_buffer.
*/
size_t vpx_svc_get_frame_size(const SvcContext *svc_ctx);
/**
- * return buffer with encoded data
+ * return buffer with encoded data. encoder will maintain a list of frame
+ * buffers. each call of vpx_svc_get_buffer() will return one frame.
*/
-void *vpx_svc_get_buffer(const SvcContext *svc_ctx);
+void *vpx_svc_get_buffer(SvcContext *svc_ctx);
/**
* return size of two pass rate control stats data to be returned by