Skip to content

Commit

Permalink
Merge pull request #209 from kaltura/BJG-MassiveBuffers
Browse files Browse the repository at this point in the history
Fixed buffering, corrected initial BEF.
  • Loading branch information
einatr committed Dec 31, 2015
2 parents 024960c + e1f4a1d commit 6aae7c1
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 2 deletions.
2 changes: 1 addition & 1 deletion HLSPlugin/src/com/kaltura/hls/HLSIndexHandler.as
Original file line number Diff line number Diff line change
Expand Up @@ -1013,7 +1013,7 @@ package com.kaltura.hls
// Get last item less MAX_SEG_BUFFER
if(segments.length > 0 && !manifest.streamEnds)
{
var bufferSeg:HLSManifestSegment = segments[Math.max(0, segments.length - HLSManifestParser.MAX_SEG_BUFFER)];
var bufferSeg:HLSManifestSegment = segments[Math.max(0, segments.length - (1 + HLSManifestParser.MAX_SEG_BUFFER))];
return initiateBestEffortRequest(bufferSeg.id, origQuality, segments, manifest);
}
else
Expand Down
8 changes: 8 additions & 0 deletions HLSPlugin/src/com/kaltura/hls/manifest/HLSManifestParser.as
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,14 @@ package com.kaltura.hls.manifest
*/
public static var SEND_LOGS:Boolean = false;

/**
* When true, we seek to live edge when we experience a buffering event.
*
* Seeking will occur if we are buffering for longer than 2x target
* duration of the current manifest.
*/
public static var ALWAYS_SEEK_TO_LIVE_EDGE_ON_BUFFER:Boolean = false;

/**
* Keep this many segments back from the live edge in DVR/Live streams.
*
Expand Down
55 changes: 54 additions & 1 deletion HLSPlugin/src/org/osmf/net/httpstreaming/HLSHTTPNetStream.as
Original file line number Diff line number Diff line change
Expand Up @@ -866,6 +866,16 @@ package org.osmf.net.httpstreaming
return result;
}

private function onJumpToLiveEdgeTimer(e:*):void
{
CONFIG::LOGGING
{
logger.debug("onJumpToLiveEdgeTimer - firing live edge seek.");
}

seek(Number.MAX_VALUE);
}

/**
* @private
*
Expand All @@ -880,7 +890,7 @@ package org.osmf.net.httpstreaming

switch(event.info.code)
{
case NetStreamCodes.NETSTREAM_BUFFER_EMPTY:
case NetStreamCodes.NETSTREAM_BUFFER_EMPTY:

// Only apply bias after our first buffering event.
if(bufferBias < HLSManifestParser.BUFFER_EMPTY_MAX_INCREASE && _state != HTTPStreamingState.HALT && neverBuffered == false)
Expand All @@ -893,6 +903,26 @@ package org.osmf.net.httpstreaming
}
}

if(HLSManifestParser.ALWAYS_SEEK_TO_LIVE_EDGE_ON_BUFFER)
{
CONFIG::LOGGING
{
logger.debug("ALWAYS_SEEK_TO_LIVE_EDGE_ON_BUFFER is active, turning on timer.");
}

if(jumpToLiveEdgeTimer)
{
jumpToLiveEdgeTimer.stop();
jumpToLiveEdgeTimer.start();
}
else
{
// Wait until we've been buffering for more than 2 segments to jump ahead.
jumpToLiveEdgeTimer = new Timer(indexHandler.getTargetSegmentDuration() * 2000);
jumpToLiveEdgeTimer.addEventListener(TimerEvent.TIMER, onJumpToLiveEdgeTimer);
}
}

neverBuffered = false;
emptyBufferInterruptionSinceLastQoSUpdate = true;
_wasBufferEmptied = true;
Expand All @@ -919,6 +949,12 @@ package org.osmf.net.httpstreaming
{
logger.debug("Received NETSTREAM_BUFFER_FULL. _wasBufferEmptied = "+_wasBufferEmptied+" bufferLength "+this.bufferLength);
}

if(jumpToLiveEdgeTimer)
{
jumpToLiveEdgeTimer.stop();
}

break;

case NetStreamCodes.NETSTREAM_BUFFER_FLUSH:
Expand Down Expand Up @@ -2436,6 +2472,21 @@ package org.osmf.net.httpstreaming
*/
private function keepBufferFed():void
{
// We have to make sure we don't cease buffering until we exceed the
// minimum buffer time - since we spoonfeed tags we have to do this
// rule manually here.
if(_wasBufferEmptied)
{
if(bufferLength < _desiredBufferTime_Min)
{
CONFIG::LOGGING
{
logger.debug("keepBufferFed - waiting until " + bufferLength + " >= " + _desiredBufferTime_Min + " to resume writing tags.");
}
return;
}
}

// Check the actual amount of content present.
if(super.bufferLength >= bufferFeedMin && !_wasBufferEmptied)
{
Expand Down Expand Up @@ -3190,5 +3241,7 @@ package org.osmf.net.httpstreaming
public static var reloadDelayTime:int = 2500;// The amount of time (in miliseconds) we want to wait between reload attempts in case of a URL error

private static const HIGH_PRIORITY:int = int.MAX_VALUE;

private var jumpToLiveEdgeTimer:Timer;
}
}

0 comments on commit 6aae7c1

Please sign in to comment.