=== modified file 'src/server/compositor/buffer_queue.cpp'
--- src/server/compositor/buffer_queue.cpp	2016-02-12 04:02:11 +0000
+++ src/server/compositor/buffer_queue.cpp	2016-02-15 08:26:32 +0000
@@ -137,7 +137,7 @@
     graphics::BufferProperties const& props,
     mc::FrameDroppingPolicyFactory const& policy_provider)
     : nbuffers{nbuffers},
-      frame_deadlines_threshold{-1},  // Disable scaling by default
+      frame_deadlines_threshold{30},  // around half a second to be sure
       frame_deadlines_met{0},
       scheduled_extra_frames{0},
       frame_dropping_enabled{false},

=== modified file 'tests/acceptance-tests/test_latency.cpp'
--- tests/acceptance-tests/test_latency.cpp	2016-02-10 04:27:20 +0000
+++ tests/acceptance-tests/test_latency.cpp	2016-02-15 08:26:32 +0000
@@ -190,6 +190,15 @@
         return max;
     }
 
+    unsigned int max_latency_from_frame(unsigned int index) const
+    {
+        unsigned int max = 0;
+        for (auto i = index; i < latency_list.size(); ++i)
+            if (latency_list[i] > max)
+                max = latency_list[i];
+        return max;
+    }
+
 private:
     std::mutex mutex;
     std::vector<unsigned int> latency_list;
@@ -246,12 +255,10 @@
     ASSERT_TRUE(stats.wait_for_posts(test_submissions,
                                      std::chrono::seconds(60)));
 
-    // Note: Using the "early release" optimization without dynamic queue
-    //       scaling enabled makes the expected latency possibly up to
-    //       nbuffers instead of nbuffers-1. After dynamic queue scaling is
-    //       enabled, the average will be lower than this.
-    float const expected_max_latency = expected_client_buffers;
-    float const expected_min_latency = expected_client_buffers - 1;
+    // This is to verify the server is performing as expected:
+    float const expected_max_latency = expected_client_buffers - 1;
+    // and this is just a sanity check that the test itself isn't broken:
+    float const expected_min_latency = 1;
 
     auto observed_latency = display.group.average_latency();
 
@@ -259,6 +266,44 @@
     EXPECT_THAT(observed_latency, Lt(expected_max_latency+error_margin));
 }
 
+TEST_F(ClientLatency, latency_scales_down_for_a_smooth_client)
+{
+    using namespace testing;
+
+    auto stream = mir_surface_get_buffer_stream(surface);
+    for(auto i = 0u; i < test_submissions; i++) {
+        auto submission_id = mir_debug_surface_current_buffer_id(surface);
+        stats.record_submission(submission_id);
+        mir_buffer_stream_swap_buffers_sync(stream);
+    }
+
+    ASSERT_TRUE(stats.wait_for_posts(test_submissions,
+                                     std::chrono::seconds(60)));
+
+    EXPECT_THAT(display.group.max_latency_from_frame(50), Le(1));
+}
+
+TEST_F(ClientLatency, latency_doesnt_scale_down_for_a_stuttery_client)
+{
+    using namespace testing;
+
+    auto stream = mir_surface_get_buffer_stream(surface);
+    for(auto i = 0u; i < test_submissions; i++) {
+        auto submission_id = mir_debug_surface_current_buffer_id(surface);
+        stats.record_submission(submission_id);
+        mir_buffer_stream_swap_buffers_sync(stream);
+
+        // Stutter
+        if (i % 10 == 0)
+            std::this_thread::sleep_for(vblank_interval * 2);
+    }
+
+    ASSERT_TRUE(stats.wait_for_posts(test_submissions,
+                                     std::chrono::seconds(60)));
+
+    EXPECT_THAT(display.group.max_latency_from_frame(50), Ge(2));
+}
+
 TEST_F(ClientLatency, latency_is_limited_to_nbuffers)
 {
     using namespace testing;

