forked from mirrors/gecko-dev
When writing to alt-data output stream fails for whatever reason, we now try to truncate alternative data and keep the original data instead of dooming the whole entry. The patch also changes how is the predicted size passed to the cache. Instead of a dedicated method it's now an argument of openOutputStream and openAlternativeOutputStream methods which fail in case the entry would exceed the allowed limit.
44 lines
1.3 KiB
JavaScript
44 lines
1.3 KiB
JavaScript
function gen_200k()
|
|
{
|
|
var i;
|
|
var data="0123456789ABCDEFGHIJLKMNO";
|
|
for (i=0; i<13; i++)
|
|
data+=data;
|
|
return data;
|
|
}
|
|
|
|
// Keep the output stream of the first entry in a global variable, so the
|
|
// CacheFile and its buffer isn't released before we write the data to the
|
|
// second entry.
|
|
var oStr;
|
|
|
|
function run_test()
|
|
{
|
|
do_get_profile();
|
|
|
|
var prefBranch = Cc["@mozilla.org/preferences-service;1"].
|
|
getService(Ci.nsIPrefBranch);
|
|
|
|
// set max chunks memory so that only one full chunk fits within the limit
|
|
prefBranch.setIntPref("browser.cache.disk.max_chunks_memory_usage", 300);
|
|
|
|
asyncOpenCacheEntry("http://a/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
|
|
function(status, entry) {
|
|
Assert.equal(status, Cr.NS_OK);
|
|
var data = gen_200k();
|
|
oStr = entry.openOutputStream(0, data.length);
|
|
Assert.equal(data.length, oStr.write(data, data.length));
|
|
|
|
asyncOpenCacheEntry("http://b/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
|
|
function(status, entry) {
|
|
Assert.equal(status, Cr.NS_OK);
|
|
var oStr2 = entry.openOutputStream(0, data.length);
|
|
do_check_throws_nsIException(() => oStr2.write(data, data.length), 'NS_ERROR_OUT_OF_MEMORY');
|
|
finish_cache2_test();
|
|
}
|
|
);
|
|
}
|
|
);
|
|
|
|
do_test_pending();
|
|
}
|