diff mbox

PING [PATCH] have __builtin_object_size handle POINTER_PLUS with non-const offset (pr 77608)

Message ID 598f7f11-5311-ea99-3826-146914148e05@gmail.com
State New
Headers show

Commit Message

Martin Sebor Nov. 21, 2016, 11:53 p.m. UTC
Richard,

Attached is a lightly updated patch mostly with just clarifying
comments and a small bug fix.  I'd appreciate your input (please
see my reply and questions below).  I'm hoping to finalize this
patch based on your feedback so it can be committed soon.

   https://gcc.gnu.org/ml/gcc-patches/2016-11/msg01127.html

Thanks
Martin

On 11/11/2016 08:56 AM, Martin Sebor wrote:
> Thanks for the review and comments!

>

>>

>> @@ -158,14 +170,149 @@ compute_object_offset (const_tree expr,

>> const_tree var)

>>    return size_binop (code, base, off);

>>  }

>>

>> +static bool

>> +operand_unsigned_p (tree op)

>> +{

>> +  if (TREE_CODE (op) == SSA_NAME)

>>

>> new functions need a comment.  But maybe you want to use

>> tree_expr_nonnegative_p

>> to also allow signed but known positive ones?

>

> Let me add a comment.

>

> operand_unsigned_p returns true if the type of the original offset

> before conversion to sizetype is unsigned.  tree_expr_nonnegative_p

> returns true if the argument's type is unsigned, which is always

> the case here.

>

>>

>> +/* Fill the 2-element OFFRANGE array with the range of values OFF

>> +   is known to be in.  Postcodition: OFFRANGE[0] <= OFFRANGE[1].  */

>> +

>> +static bool

>> +get_offset_range (tree off, HOST_WIDE_INT offrange[2])

>> +{

>> +  STRIP_NOPS (off);

>>

>> why strip nops (even sign changes!) here?

>

> That might be a leftover from an earlier/failed attempt to simplify

> things that I forgot to remove.  Let me do that in a followup patch.

> Unless I misunderstand your comment there are no sign changes (AFAIK)

> because the offset is always represented as sizetype.

>

>> Why below convert things

>> via to_uhwi when offrange is of type signed HOST_WIDE_INT[2]?

>

> The offset is always represented as sizetype but the code treats

> it as signed because in reality it can be negative.  That said,

> I don't find dealing with ranges very intuitive so I could very

> well be missing something and there may be a better way to code

> this.  I welcome suggestions.

>

>>

>> +         gimple *def = SSA_NAME_DEF_STMT (off);

>> +         if (is_gimple_assign (def))

>> +           {

>> +             tree_code code = gimple_assign_rhs_code (def);

>> +             if (code == PLUS_EXPR)

>> +               {

>> +                 /* Handle offset in the form VAR + CST where VAR's type

>> +                    is unsigned so the offset must be the greater of

>> +                    OFFRANGE[0] and CST.  This assumes the PLUS_EXPR

>> +                    is in a canonical form with CST second.  */

>> +                 tree rhs2 = gimple_assign_rhs2 (def);

>>

>> err, what?  What about overflow?  Aren't you just trying to decompose

>> 'off' into a variable and a constant part here and somehow extracting a

>> range for the variable part?  So why not just do that?

>

> Sorry, what about overflow?

>

> The purpose of this code is to handle cases of the form

>

>    & PTR [range (MIN, MAX)] + CST

>

> where CST is unsigned implying that the lower bound of the offset

> is the greater of CST and MIN.  For instance, in the following it

> determines that bos(p, 0) is 4 (and if the 3 were greater than 7

> and overflowed the addition the result would be zero).  I'm not

> sure I understand what you suggest I do differently to make this

> work.

>

>    char d[7];

>

>    #define bos(p, t) __builtin_object_size (p, t)

>

>    long f (unsigned i)

>    {

>      if (2 < i) i = 2;

>

>      char *p = &d[i] + 3;

>

>      return bos (p, 0);

>    }

>>

>> +      else if (range_type == VR_ANTI_RANGE)

>> +       {

>> +         offrange[0] = max.to_uhwi () + 1;

>> +         offrange[1] = min.to_uhwi () - 1;

>> +         return true;

>> +       }

>>

>> first of all, how do you know it fits uhwi?  Second, from ~[5, 9] you get

>> [10, 4] !?  That looks bogus (and contrary to the function comment

>> postcondition)

>

> I admit I have some trouble working with anti-ranges.  It's also

> difficult to fully exercise them in this pass because it only runs

> after EVRP but not after VRP1 (except with -g), so only limited

> range information is available. (I'm hoping to eventually change

> it but moving the passes broke a test in a way that seemed too

> complex to fix for this project).

>

> The code above is based on the observation that an anti-range is

> often used to represent the full subrange of a narrower signed type

> like signed char (as ~[128, -129]).  I haven't been able to create

> an anti-range like ~[5, 9]. When/how would a range like that come

> about (so I can test it and implement the above correctly)?

>

>>

>> +      else if (range_type == VR_VARYING)

>> +       {

>> +         gimple *def = SSA_NAME_DEF_STMT (off);

>> +         if (is_gimple_assign (def))

>> +           {

>> +             tree_code code = gimple_assign_rhs_code (def);

>> +             if (code == NOP_EXPR)

>> +               {

>>

>> please trust range-info instead of doing your own little VRP here.

>> VR_VARYING -> return false.

>

> I would prefer to rely on the range information and not have to work

> around it like I do here but, unfortunately, it doesn't always appear

> to be available.  For example, in the following test case:

>

>    char d[130];

>

>    #define bos(p, t) __builtin_object_size (p, t)

>

>    void f (void*);

>

>    void g (signed char i)

>    {

>       char *p = &d [i] + 128;

>      f(__builtin___memset_chk (p, '*', 2, bos (p, 0)));   // okay

>

>      f(__builtin___memset_chk (p, '*', 3, bos (p, 0)));   // overflow

>    }

>

> the range type is VR_VARYING but the code makes it possible to diagnose

> the overflow in the second call to memset.

>

> Maybe the poor range info i a consequence of the pass only benefiting

> from EVRP and not VRP?

>

>>

>> stopping review here noting that other parts of the compiler

>> split constant parts from variable parts and it looks like this is

>> what you want to do here?  That is, enhance

>>

>> static vec<unsigned HOST_WIDE_INT> object_sizes[4];

>>

>> and cache a SSA-NAME, constant offset pair in addition?  Or just a range

>> (range of that SSA name + offset), if that's good enough -- wait, that's

>> what you get from get_range_info!

>

> I agree that storing the offsets could be an enhancement to consider.

>  The patch mentions it in the FIXME part of the following comment:

>

> /* Bitmaps of variables whose object sizes have been computed without

>    regard to the (non-constant) offset into them.  A bit in each bitmap

>    is valid only if the corresponding bit in the COMPUTED bitmap is

>    non-zero (otherwise it's zero).

>    FIXME: Change this to a vector of offset ranges to make it possible

>    to handle cases like &A[I] + J where both I and J are non-const and

>    bounded by some range.  */

> static bitmap nonconst_offsets[4];

>

> It's just something I haven't had time to work on yet and with the

> close of stage 1 approaching I wanted to put out this version for

> review.  Do you view this enhancement as prerequisite for approving

> the patch or is it something that you'd be fine with adding later?

>

>>

>> So I'm not sure where the whole complication in the patch comes from...

>>

>> Possibly from the fact that VRP on pointers is limited and thus &a[i] + 5

>> doesn't get you a range for i + 5?

>

> get_range_info doesn't accept pointers at all (perhaps that's what

> you meant by "VRP on pointers is limited").  In Gimple, &a[i] + 5

> is represented as just that (i.e., _1 = &a[i_3]; p_6 = _1 + 5;) and

> so to get the right answer for bos(&a[i] + 5) the patch jumps though

> some hoops.  But again, I could very well be missing something that's

> obvious to you so if you think that's the case I'd be happy to simplify

> the code if you point me in the right direction.

>

> Martin
diff mbox

Patch

PR middle-end/77608 - missing protection on trivially detectable runtime buffer overflow

gcc/ChangeLog:
2016-11-07  Martin Sebor  <msebor@redhat.com>

	PR middle-end/77608
	* tree-object-size.c (nonconst_offsets): New global.
	(compute_object_offset): New function.
	(addr_object_size): Add an argument.
	(compute_builtin_object_size): Rename...
	(internal_object_size): to this.
	(expr_object_size): Adjust.
	(merge_object_sizes): Handle non-constant offset.
	(plus_stmt_object_size): Same.
	(collect_object_sizes_for): Change to return bool.
	(init_object_sizes): Initialize nonconst_offsets.
	(fini_object_sizes): Release nonconst_offsets.

gcc/testsuite/ChangeLog:
2016-11-07  Martin Sebor  <msebor@redhat.com>

	PR middle-end/77608
	* gcc.c-torture/execute/builtins/lib/chk.c: Add debugging output.
	* gcc.c-torture/execute/builtins/mempcpy-chk.c: Add debugging output.
	(test5): Allow test to emit __mempcpy_chk.
	* gcc.dg/builtin-object-size-18.c: New test.
	* gcc.dg/builtin-stringop-chk-3.c: New test.
	* gcc.dg/pr77608.c: New test.

diff --git a/gcc/testsuite/gcc.c-torture/execute/builtins/lib/chk.c b/gcc/testsuite/gcc.c-torture/execute/builtins/lib/chk.c
index b19d7bf..6966b41 100644
--- a/gcc/testsuite/gcc.c-torture/execute/builtins/lib/chk.c
+++ b/gcc/testsuite/gcc.c-torture/execute/builtins/lib/chk.c
@@ -5,6 +5,15 @@ 
 
 extern void abort (void);
 
+const char *testfunc;
+int testline;
+
+#define abort()								\
+  (__builtin_printf ("%s:%i: %s: test failure in %s on line %i\n",	\
+		     __FILE__, __LINE__, __FUNCTION__,			\
+		     testfunc, testline),				\
+   __builtin_abort ())
+
 extern int inside_main;
 void *chk_fail_buf[256] __attribute__((aligned (16)));
 volatile int chk_fail_allowed, chk_calls;
diff --git a/gcc/testsuite/gcc.c-torture/execute/builtins/mempcpy-chk.c b/gcc/testsuite/gcc.c-torture/execute/builtins/mempcpy-chk.c
index 7a1737c..b014aff 100644
--- a/gcc/testsuite/gcc.c-torture/execute/builtins/mempcpy-chk.c
+++ b/gcc/testsuite/gcc.c-torture/execute/builtins/mempcpy-chk.c
@@ -9,6 +9,15 @@  extern void *memcpy (void *, const void *, size_t);
 extern void *mempcpy (void *, const void *, size_t);
 extern int memcmp (const void *, const void *, size_t);
 
+extern const char *testfunc;
+extern int testline;
+
+#define memcpy(d, s, n)				\
+  (testfunc = __func__, testline = __LINE__, memcpy (d, s, n))
+
+#define mempcpy(d, s, n)			\
+  (testfunc = __func__, testline = __LINE__, mempcpy (d, s, n))
+
 #include "chk.h"
 
 const char s1[] = "123";
@@ -17,6 +26,8 @@  volatile char *s2 = "defg"; /* prevent constant propagation to happen when whole
 volatile char *s3 = "FGH"; /* prevent constant propagation to happen when whole program assumptions are made.  */
 volatile size_t l1 = 1; /* prevent constant propagation to happen when whole program assumptions are made.  */
 
+#define abort() (__builtin_printf ("failure on line %i\n", __LINE__), __builtin_abort ())
+
 void
 __attribute__((noinline))
 test1 (void)
@@ -326,6 +337,8 @@  test4 (void)
   char buf3[20];
 
   chk_fail_allowed = 1;
+  mempcpy_disallowed = 0;
+
   /* Runtime checks.  */
   if (__builtin_setjmp (chk_fail_buf) == 0)
     {
@@ -343,7 +356,9 @@  test4 (void)
       vx = mempcpy (&buf3[19], "ab", 2);
       abort ();
     }
+
   chk_fail_allowed = 0;
+  mempcpy_disallowed = 1;
 }
 
 #ifndef MAX_OFFSET
@@ -377,6 +392,12 @@  test5 (void)
   int off1, off2, len, i;
   char *p, *q, c;
 
+  /* The call to mempcpy below, even though it's safe, may result in
+     a call to __mempcpy_chk when the (maximum) size of the destination
+     is determined.  */
+  int mempcpy_disallowed_save = mempcpy_disallowed;
+  mempcpy_disallowed = 0;
+
   for (off1 = 0; off1 < MAX_OFFSET; off1++)
     for (off2 = 0; off2 < MAX_OFFSET; off2++)
       for (len = 1; len < MAX_COPY; len++)
@@ -410,6 +431,8 @@  test5 (void)
 	    if (*q != 'a')
 	      abort ();
 	}
+
+  mempcpy_disallowed = mempcpy_disallowed_save;
 }
 
 #define TESTSIZE 80
diff --git a/gcc/testsuite/gcc.dg/builtin-object-size-18.c b/gcc/testsuite/gcc.dg/builtin-object-size-18.c
new file mode 100644
index 0000000..b1a5666
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-object-size-18.c
@@ -0,0 +1,115 @@ 
+/* PR 77608 - missing protection on trivially detectable runtime buffer
+   overflow */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+
+#define concat(a, b)   a ## b
+#define CAT(a, b)      concat (a, b)
+
+/* Create a symbol name unique to each tes and object size type.  */
+#define SYM(store, type)  \
+  CAT (CAT (CAT (CAT (CAT (failure_on_line_, __LINE__), \
+		      _), store), _type_), type)
+
+/* References to the following undefined symbol which is unique for each
+   test case are expected to be eliminated.  */
+#define TEST_FAILURE(store, type)			\
+  do {							\
+    extern void SYM (store, type)(void);		\
+    SYM (store, type)();				\
+  } while (0)
+
+#define bos(ptr, type) __builtin_object_size (ptr, type)
+
+#define test(expect, type, ptr, store)		\
+  do {						\
+    if (bos (ptr, type)	!= expect)		\
+      TEST_FAILURE (store, type);		\
+  } while (0)
+
+/* Verify that each of the expressions __builtin_object_size(OBJ, n)
+   is folded into Rn.  */
+#define fold(r0, r1, r2, r3, ptr, store)	\
+  do {						\
+    test (r0, 0, ptr, store);			\
+    test (r1, 1, ptr, store);			\
+    test (r2, 2, ptr, store);			\
+    test (r3, 3, ptr, store);			\
+  } while (0)
+
+/* Verify that __builtin_object_size(xBUF + OFF, n) is folded into Rn
+   for the specified OFF and each of the Rn values.  */
+#define FOLD(ptr, r0, r1, r2, r3)		\
+  do {						\
+    {						\
+      char *buf = gbuf;				\
+      fold (r0, r1, r2, r3, ptr, static);	\
+      sink (buf);				\
+    }						\
+    {						\
+      char *buf = lbuf;				\
+      fold (r0, r1, r2, r3, ptr, auto);		\
+      sink (buf);				\
+    }						\
+    {						\
+      char *buf = abuf;				\
+      fold (r0, r1, r2, r3, ptr, allocated);	\
+      sink (buf);				\
+    }						\
+  } while (0)
+
+#define SCHAR_MAX __SCHAR_MAX__
+#define UCHAR_MAX (__SCHAR_MAX__  * 2 + 1)
+#define N         ((UCHAR_MAX + 1) * 2)
+
+typedef __SIZE_TYPE__ size_t;
+
+void sink (void*, ...);
+
+char gbuf[N];
+
+void test_pointer_plus (char ci, short si, int i, long li)
+{
+  char lbuf[N];
+
+  char *abuf = __builtin_malloc (N);
+
+  FOLD (buf + ci, N, N, N - SCHAR_MAX, N - SCHAR_MAX);
+  FOLD (buf + si, N, N, 0, 0);
+  FOLD (buf +  i, N, N, 0, 0);
+  FOLD (buf + li, N, N, 0, 0);
+
+  FOLD (&buf [1] + ci, N - 1, N - 1, N - UCHAR_MAX - 1, N - UCHAR_MAX - 1);
+}
+
+static inline int
+r (int min, int max)
+{
+  extern int rand (void);
+  int x = rand ();
+  return x < min || max < x ? min : x;
+}
+
+void test_pointer_plus_range (void)
+{
+  char lbuf[N];
+
+  char *abuf = __builtin_malloc (N);
+
+  int i = r (0, 1);
+  FOLD (buf + i, N, N, N - 1, N - 1);
+}
+
+void test_pointer_plus_anti_range (void)
+{
+}
+
+static inline int
+ar (int min, int max)
+{
+  extern int rand (void);
+  int x = rand ();
+  return x < min || max < x ? x : min;
+}
+
+/* { dg-final { scan-tree-dump-not "failue_on_line" "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/builtin-stringop-chk-3.c b/gcc/testsuite/gcc.dg/builtin-stringop-chk-3.c
new file mode 100644
index 0000000..064bfe9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/builtin-stringop-chk-3.c
@@ -0,0 +1,349 @@ 
+/* Test exercising buffer overflow warnings emitted for __*_chk builtins
+   in cases where the destination involves a non-constant offset into
+   an object of known size.  */
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftrack-macro-expansion=0" } */
+
+#define INT_MAX      __INT_MAX__
+#define PTRDIFF_MAX  __PTRDIFF_MAX__
+#define SIZE_MAX     __SIZE_MAX__
+
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+typedef __SIZE_TYPE__    size_t;
+
+#define _CAT(x, y)   x ## y
+#define CAT(x, y)    _CAT (x, y)
+#define VAR(x)        CAT (x, __LINE__)
+
+void sink (void*);
+
+/* Define memcpy as a macro (as opposed to an inline function) so that
+   warnings point to its invocation in the tests (as opposed to its
+   definition), making sure its first argument is evaluated exactly
+   once.  */
+#define memcpy(d, s, n)							\
+  do {									\
+    __typeof__ (d) VAR (p) = (d);					\
+    __builtin___memcpy_chk (VAR (p), s, n,				\
+			    __builtin_object_size (VAR (p), 1));	\
+    sink (VAR (p));							\
+  } while (0)
+
+/* Function to generate a unique offset each time it's called.  Declared
+   (but not defined) and used to prevent GCC from making assumptions about
+   their values based on the variables uses in the tested expressions.  */
+ptrdiff_t random_value (void);
+
+/* For brevity. */
+#define X() random_value ()
+
+/* Test memcpy with a variable offset not known to be in any range
+   and with a constant number of bytes.  */
+
+void test_var_const (const void *p)
+{
+  char buf[5];
+
+  memcpy (buf + X(), p, 6);  /* { dg-warning "writing 6 bytes into a region of size 5" } */
+
+  memcpy (&buf[X()], p, 6);  /* { dg-warning "writing" } */
+
+  /* Since X() below can be assumed to be non-negative (otherwise it would
+     result in forming a pointer before the beginning of BUF), then because
+     of the +1 added to the resulting address there must be at most enough
+     room for (sizeof(buf) - 1) or 4 bytes.  */
+  memcpy (&buf[X()] + 1, p, 4);
+
+  memcpy (&buf[X()] + 1, p, 5);  /* { dg-warning "writing" } */
+
+  memcpy (&buf[X()] + 5, p, 6);  /* { dg-warning "writing" } */
+
+  /* The negative constant offset below must have no effect on the maximum
+     size of the buffer.  */
+  memcpy (&buf[X()] - 1, p, 5);
+
+  memcpy (&buf[X()] - 1, p, 6);  /* { dg-warning "writing" } */
+
+  memcpy (&buf[X()] - 5, p, 6);  /* { dg-warning "writing" } */
+
+  memcpy (&buf[X()] + X(), p, 5);
+
+  memcpy (&buf[X()] + X(), p, 6);  /* { dg-warning "writing" } */
+
+  memcpy (&buf[X()] - X(), p, 5);
+
+  memcpy (&buf[X()] - X(), p, 6);  /* { dg-warning "writing" } */
+}
+
+static inline size_t
+range (size_t min, size_t max)
+{
+  const size_t val = random_value ();
+  return val < min || max < val ? min : val;
+}
+
+/* Test memcpy with a variable offset not known to be in any range
+   and with a number of bytes bounded by a known range.  */
+
+void test_var_range (void *dst, const void *p)
+{
+  char buf[5];
+
+  memcpy (&buf[X()], p, range (0, 5));
+  memcpy (&buf[X()], p, range (1, 5));
+  memcpy (&buf[X()], p, range (2, 5));
+  memcpy (&buf[X()], p, range (3, 5));
+  memcpy (&buf[X()], p, range (4, 5));
+
+  memcpy (&buf[X()], p, range (6, 7));  /* { dg-warning "writing" } */
+
+  const size_t max = SIZE_MAX;
+  memcpy (&buf[X()], p, range (max - 1, max));  /* { dg-warning "specified size between \[0-9\]+ and \[0-9\]+ exceeds maximum object size" } */
+
+  memcpy (dst, p, range (max / 2 + 1, max));  /* { dg-warning "specified size between \[0-9\]+ and \[0-9\]+ exceeds maximum object size" } */
+}
+
+/* Return an ingeger in the range [MIN, MASK].  Use bitwise operations
+   rather than inequality to avoid relying on optimization passes
+   beyond Early Value Range Propagation that __builtin_object_size
+   doesn't make use of (yet).  */
+
+static inline size_t
+simple_range (unsigned min, unsigned mask)
+{
+  return ((unsigned)random_value () & mask) | (min & mask);
+}
+
+/* For brevity. */
+#define R(min, max) simple_range (min, max)
+
+void test_range_auto (const void *p)
+{
+  char buf[5];
+
+  memcpy (buf + R (0, 1), p, 6);  /* { dg-warning "writing" } */
+
+  /* Some of these could be diagnosed as an extension because they would
+     overflow when (if) the non-const index were sufficiently large.
+     The challenge is distinguishing a range where a variable is likely
+     to exceed the minimum required for the overflow to occur from one
+     where it isn't so likely.  */
+  memcpy (buf + R (0, 1), p, 5);
+
+  memcpy (buf + R (0, 1), p, 4);
+
+  memcpy (buf + R (0, 2), p, 3);
+
+  memcpy (buf + R (0, 3), p, 2);
+
+  memcpy (buf + R (0, 7), p, 1);
+
+  memcpy (buf + R (0, 5), p, 0);
+
+  /* The offset is known to be at least 1 so the size of the object
+     is at most 4.  */
+  memcpy (buf + R (1, 2), p, 3);
+
+  memcpy (buf + R (1, 3), p, 3);
+
+  memcpy (buf + R (1, 2), p, 4);
+
+  memcpy (buf + R (1, 3) + 1, p, 4);  /* { dg-warning "writing" } */
+
+  memcpy (buf + R (1, 3), p, 5);  /* { dg-warning "writing" } */
+
+  memcpy (buf + R (2, 3), p, 2);
+
+  memcpy (buf + R (2, 3), p, 3);
+
+  memcpy (buf + R (2, 3) + 1, p, 3);  /* { dg-warning "writing" } */
+
+  memcpy (buf + R (2, 3), p, 4);  /* { dg-warning "writing" } */
+
+  memcpy (buf + R (3, 4), p, 2);
+
+  memcpy (buf + R (3, 7), p, 3);  /* { dg-warning "writing" } */
+
+  memcpy (buf + R (3, 7), p, 9);  /* { dg-warning "writing" } */
+
+  memcpy (&buf[R (0, 1)] + 1, p, 4);
+
+  memcpy (&buf[R (0, 2)] + 1, p, 4);
+
+  memcpy (&buf[R (0, 3)] + 1, p, 4);
+
+  memcpy (&buf[R (0, 4)] + 1, p, 4);
+
+  memcpy (&buf[R (0, 5)] + 1, p, 4);
+
+  memcpy (&buf[R (0, 2)] + 2, p, 4);  /* { dg-warning "writing" } */
+
+  memcpy (&buf[R (0, 2)] - 2, p, 3);
+  memcpy (&buf[R (0, 2)] - 2, p, 3);
+
+  memcpy (&buf[R (0, 2)] - 2, p, 3);
+
+  memcpy (&buf[R (5, 15)], p, 1);  /* { dg-warning "writing" } */
+
+  /* With the offset given by the two ranges below there is at most
+     1 byte left.  */
+  memcpy (buf + R (1, 2) + R (3, 4), p, 1);
+
+  memcpy (buf + R (1, 3) + R (3, 7), p, 2);   /* { dg-warning "writing" } */
+
+  /* Unfortunately, the following isn't handled quite right: only
+     the lower bound given by the first range is used, the second
+     one is disregarded.  */
+  memcpy (&buf [R (1, 2)] + R (3, 4), p, 2);   /* { dg-warning "writing" "index in range plus offset in range" { xfail *-*-* } } */
+
+  memcpy (buf + R (2, 3) + R (2, 3), p, 1);
+
+  memcpy (buf + R (2, 3) + R (2, 3), p, 2);   /* { dg-warning "writing" } */
+}
+
+void test_range_malloc (const void *p)
+{
+  char *buf = __builtin_malloc (5);
+
+  memcpy (buf + R (0, 1), p, 6);  /* { dg-warning "writing" } */
+
+  memcpy (buf + R (0, 1), p, 5);
+
+  memcpy (buf + R (0, 1), p, 4);
+
+  memcpy (buf + R (0, 2), p, 3);
+
+  memcpy (buf + R (0, 3), p, 2);
+
+  memcpy (buf + R (0, 4), p, 1);
+
+  memcpy (buf + R (0, 5), p, 0);
+}
+
+void test_range_schar (signed char i, const void *s)
+{
+  char a [130];
+
+  /* The range of I is [-128, 127] so the size of the destination below
+     is at most 2 (i.e., 130 - 128) bytes. */
+  memcpy (&a [130] + i, s, 2);
+
+  /* Strictly, the range of I below is [0, 127] because a negative value
+     would result in forming an invalid pointer, so the destination is at
+     most 2 bytes. */
+  memcpy (&a [i] + 128, s, 2);
+
+  /* Reset I's range just in case it gets set above.  */
+  i = random_value ();
+  memcpy (&a [130] + i, s, 3);   /* { dg-warning "writing" } */
+
+  i = random_value ();
+  memcpy (&a [i] + 128, s, 3);   /* { dg-warning "writing" } */
+}
+
+void test_range_uchar (unsigned char i, const void *s)
+{
+  char a [260];
+
+  /* The range of I is [0, 255] so the size of the destination below
+     is at most 2 (i.e., 260 - 258 + 0) bytes. */
+  memcpy (&a [258] + i, s, 2);
+
+  memcpy (&a [i] + 128, s, 2);
+
+  /* Reset I's range just in case it gets set above.  */
+  i = random_value ();
+  memcpy (&a [258] + i, s, 3);   /* { dg-warning "writing" } */
+
+  i = random_value ();
+  memcpy (&a [i] + 258, s, 3);   /* { dg-warning "writing" } */
+
+  i = random_value ();
+  memcpy (&a [259] + i, s, 2);   /* { dg-warning "writing" } */
+
+  i = random_value ();
+  memcpy (&a [i] + 259, s, 2);   /* { dg-warning "writing" } */
+
+  i = random_value ();
+  memcpy (&a [260] + i, s, 1);   /* { dg-warning "writing" } */
+
+  i = random_value ();
+  memcpy (&a [i] + 260, s, 1);   /* { dg-warning "writing" } */
+
+  i = random_value ();
+  memcpy (&a [260] + i, s, 0);
+
+  i = random_value ();
+  memcpy (&a [i] + 260, s, 0);
+}
+
+void test_range_int (int i, const void *s)
+{
+  const size_t max = (size_t)INT_MAX * 2 + 1;
+
+  char *a = __builtin_malloc (max);
+
+  memcpy (&a [max] + i, s, INT_MAX);
+  memcpy (&a [max] - i, s, INT_MAX);
+  /* &*/
+  memcpy (&a [max] + i, s, INT_MAX + (size_t)1);
+  memcpy (&a [max] - i, s, INT_MAX + (size_t)1); /* { dg-warning "writing" } */
+  memcpy (&a [max] + i, s, INT_MAX + (size_t)2); /* { dg-warning "writing" } */
+  memcpy (&a [max] - i, s, INT_MAX + (size_t)2); /* { dg-warning "writing" } */
+
+  char *end = &a [max];
+  memcpy (end + i, s, INT_MAX);
+  memcpy (end - i, s, INT_MAX);
+  /* &*/
+  memcpy (end + i, s, INT_MAX + (size_t)1);
+  memcpy (end - i, s, INT_MAX + (size_t)1); /* { dg-warning "writing" } */
+  memcpy (end + i, s, INT_MAX + (size_t)2); /* { dg-warning "writing" } */
+  memcpy (end - i, s, INT_MAX + (size_t)2); /* { dg-warning "writing" } */
+
+  memcpy (&a [i] + max, s, 1);
+}
+
+
+void test_range_ptrdiff_t (ptrdiff_t i, const void *s)
+{
+  const size_t max = PTRDIFF_MAX;
+
+  char *a = __builtin_malloc (max);
+
+  memcpy (&a [max] + i, s, max);
+
+  memcpy (&a [i] + max - 1, s, 1);
+}
+
+void test_range_size_t (size_t i, const void *s)
+{
+  const size_t diffmax = PTRDIFF_MAX;
+
+  char *a = __builtin_malloc (diffmax);
+
+  memcpy (&a [diffmax] + i, s, 0);
+  memcpy (&a [i] + diffmax, s, 0);
+
+  memcpy (&a [diffmax] + i, s, 1);  /* { dg-warning "writing" } */
+
+  memcpy (&a [i] + diffmax, s, 1);  /* { dg-warning "writing" } */
+}
+
+struct S {
+  int i;
+  char a7[7];
+  int b;
+};
+
+void test_range_member_array (struct S *s, const void *p)
+{
+  memcpy (s->a7 + R (0, 1), p, 6);
+
+  memcpy (s->a7 + R (0, 1), p, 7);
+
+  memcpy (s->a7 + R (0, 1), p, 8);  /* { dg-warning "writing" } */
+
+  memcpy (&s->a7 [R (0, 1)], p, 7);
+
+  memcpy (&s->a7 [R (1, 3)], p, 7);  /* { dg-warning "writing" } */
+}
diff --git a/gcc/testsuite/gcc.dg/pr77608.c b/gcc/testsuite/gcc.dg/pr77608.c
new file mode 100644
index 0000000..04f831a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr77608.c
@@ -0,0 +1,19 @@ 
+/* { dg-do compile } */
+/* { dg-options "-O -Wall -ftrack-macro-expansion=0" } */
+
+#define bos(dest) __builtin_object_size (dest, 0)
+#define memcpy(dest, src, n)				\
+  __builtin___memcpy_chk (dest, src, n, bos (dest))
+
+void f (void*);
+
+void g (int i)
+{
+  char d [3];
+
+  memcpy (d + i, "abcdef", 5);   /* { dg-warning "writing" } */
+
+  f (d);
+}
+
+/* { dg-final { scan-tree-dump-times "__builtin___memcpy_chk" 1 "optimized" } } */
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index 1317ad7..5e84448 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -33,6 +33,8 @@  along with GCC; see the file COPYING3.  If not see
 #include "gimple-iterator.h"
 #include "tree-cfg.h"
 
+#include "diagnostic-core.h"
+
 struct object_size_info
 {
   int object_size_type;
@@ -52,11 +54,12 @@  static const unsigned HOST_WIDE_INT unknown[4] = {
 
 static tree compute_object_offset (const_tree, const_tree);
 static bool addr_object_size (struct object_size_info *,
-			      const_tree, int, unsigned HOST_WIDE_INT *);
+			      const_tree, int, unsigned HOST_WIDE_INT *,
+			      bool * = NULL);
 static unsigned HOST_WIDE_INT alloc_object_size (const gcall *, int);
 static tree pass_through_call (const gcall *);
-static void collect_object_sizes_for (struct object_size_info *, tree);
-static void expr_object_size (struct object_size_info *, tree, tree);
+static bool collect_object_sizes_for (struct object_size_info *, tree);
+static bool expr_object_size (struct object_size_info *, tree, tree);
 static bool merge_object_sizes (struct object_size_info *, tree, tree,
 				unsigned HOST_WIDE_INT);
 static bool plus_stmt_object_size (struct object_size_info *, tree, gimple *);
@@ -74,9 +77,18 @@  static void check_for_plus_in_loops_1 (struct object_size_info *, tree,
    the object and object_sizes[3] lower bound for subobject.  */
 static vec<unsigned HOST_WIDE_INT> object_sizes[4];
 
-/* Bitmaps what object sizes have been computed already.  */
+/* Bitmaps of variables whose object sizes have already been computed.  */
 static bitmap computed[4];
 
+/* Bitmaps of variables whose object sizes have been computed without
+   regard to the (non-constant) offset into them.  A bit in each bitmap
+   is valid only if the corresponding bit in the COMPUTED bitmap is
+   non-zero (otherwise it's zero).
+   FIXME: Change this to a vector of offset ranges to make it possible
+   to handle cases like &A[I] + J where both I and J are non-const and
+   bounded by some range.  */
+static bitmap nonconst_offsets[4];
+
 /* Maximum value of offset we consider to be addition.  */
 static unsigned HOST_WIDE_INT offset_limit;
 
@@ -158,14 +170,155 @@  compute_object_offset (const_tree expr, const_tree var)
   return size_binop (code, base, off);
 }
 
+static bool
+operand_unsigned_p (tree op)
+{
+  if (TREE_CODE (op) == SSA_NAME)
+    {
+      gimple *def = SSA_NAME_DEF_STMT (op);
+      if (is_gimple_assign (def))
+	{
+	  tree_code code = gimple_assign_rhs_code (def);
+	  if (code == NOP_EXPR)
+	      op = gimple_assign_rhs1 (def);
+	}
+    }
+
+  return TYPE_UNSIGNED (TREE_TYPE (op));
+}
+
+/* Fill the 2-element OFFRANGE array with the range of values OFF
+   is known to be in.  Postcodition: OFFRANGE[0] <= OFFRANGE[1].  */
+
+static bool
+get_offset_range (tree off, HOST_WIDE_INT offrange[2])
+{
+  if (TREE_CODE (off) == NOP_EXPR)
+    {
+      return get_offset_range (TREE_OPERAND (off, 0), offrange);
+    }
+
+  if (TREE_CODE (off) == SSA_NAME)
+    {
+      wide_int min, max;
+      enum value_range_type range_type = get_range_info (off, &min, &max);
+
+      if (range_type == VR_RANGE)
+	{
+	  /* Interpret the range in the variable's type.  */
+	  tree tmin = wide_int_to_tree (TREE_TYPE (off), min);
+	  tree tmax = wide_int_to_tree (TREE_TYPE (off), max);
+
+	  offrange[0] = (tree_fits_shwi_p (tmin)
+			 ? tree_to_shwi (tmin) : tree_to_uhwi (tmin));
+	  offrange[1] = (tree_fits_shwi_p (tmax)
+			  ? tree_to_shwi (tmax) : tree_to_uhwi (tmax));
+
+	  gimple *def = SSA_NAME_DEF_STMT (off);
+	  if (is_gimple_assign (def))
+	    {
+	      tree_code code = gimple_assign_rhs_code (def);
+	      if (code == PLUS_EXPR)
+		{
+		  /* Handle offset in the form VAR + CST where VAR's type
+		     is unsigned so the offset must be the greater of
+		     OFFRANGE[0] and CST.  This assumes the PLUS_EXPR
+		     is in a canonical form with CST second.  */
+		  tree rhs2 = gimple_assign_rhs2 (def);
+		  if (TREE_CODE (rhs2) == INTEGER_CST)
+		    {
+		      tree rhs1 = gimple_assign_rhs1 (def);
+
+		      HOST_WIDE_INT minoff
+			= (tree_fits_shwi_p (rhs2)
+			   ? tree_to_shwi (rhs2) : tree_to_uhwi (rhs2));
+
+		      if (offrange[0] < minoff && operand_unsigned_p (rhs1))
+			offrange[0] = minoff;
+		    }
+		}
+	    }
+	  return true;
+	}
+      else if (range_type == VR_ANTI_RANGE)
+	{
+	  if (wi::fits_uhwi_p (min) && wi::fits_uhwi_p (max))
+	    {
+	      offrange[0] = max.to_uhwi () + 1;
+	      offrange[1] = min.to_uhwi () - 1;
+	      return true;
+	    }
+	  return false;
+	}
+      else if (range_type == VR_VARYING)
+	{
+	  gimple *def = SSA_NAME_DEF_STMT (off);
+	  if (is_gimple_assign (def))
+	    {
+	      /* The range information available in this pass tends
+		 to be very limited (currentlt only EVRP).  Try to
+		 limit the range based on the type of the expression
+		 before it has been converted to sizetype.  */
+	      tree_code code = gimple_assign_rhs_code (def);
+	      if (code == NOP_EXPR)
+		{
+		  off = gimple_assign_rhs1 (def);
+		  if (offrange[0] != offrange[1])
+		    {
+		      tree type = TREE_TYPE (off);
+		      tree tmin = TYPE_MIN_VALUE (type);
+		      tree tmax = TYPE_MAX_VALUE (type);
+		      offrange[0]
+			= (tree_fits_shwi_p (tmin)
+			   ? tree_to_shwi (tmin) : tree_to_uhwi (tmin));
+		      offrange[1]
+			= (tree_fits_shwi_p (tmax)
+			   ? tree_to_shwi (tmax) : tree_to_uhwi (tmax));
+		    }
+
+		  return true;
+		}
+	    }
+
+	  /* For types smaller than size_t determine their range.  */
+	  tree type = TREE_TYPE (off);
+	  if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
+	    {
+	      tree tmin = TYPE_MIN_VALUE (type);
+	      tree tmax = TYPE_MAX_VALUE (type);
+	      offrange[0]
+		= (tree_fits_shwi_p (tmin)
+		   ? tree_to_shwi (tmin) : tree_to_uhwi (tmin));
+	      offrange[1]
+		= (tree_fits_shwi_p (tmax)
+		   ? tree_to_shwi (tmax) : tree_to_uhwi (tmax));
+
+	      return true;
+	    }
+	}
+    }
+  else if (TREE_CODE (off) == INTEGER_CST && tree_fits_uhwi_p (off))
+    {
+      offrange[0] = offrange[1] = tree_to_uhwi (off);
+      return true;
+    }
+
+  return false;
+}
 
 /* Compute __builtin_object_size for PTR, which is a ADDR_EXPR.
    OBJECT_SIZE_TYPE is the second argument from __builtin_object_size.
-   If unknown, return unknown[object_size_type].  */
+   If known set *PSIZE to it, otherwise set it to unknown[object_size_type].
+   When PIGNORE_OFFSET is non-null and *PIGNORE_OFFSET is true, ignore
+   the offset from PTR in the computation of the size.  When the ADDR_EXPR
+   is determined to involve a non-constant offset, set *PIGNORE_OFFSET to
+   true before returning.  This allows callers to also ignore constant
+   offsets (such as the '3' in &p->a[x] + 3 when x is not constant).  */
 
 static bool
 addr_object_size (struct object_size_info *osi, const_tree ptr,
-		  int object_size_type, unsigned HOST_WIDE_INT *psize)
+		  int object_size_type, unsigned HOST_WIDE_INT *psize,
+		  bool *pignore_offset)
 {
   tree pt_var, pt_var_size = NULL_TREE, var_size, bytes;
 
@@ -346,15 +499,42 @@  addr_object_size (struct object_size_info *osi, const_tree ptr,
 	return false;
       else
 	var_size = pt_var_size;
-      bytes = compute_object_offset (TREE_OPERAND (ptr, 0), var);
+
+      /* Compute the offset into the object given by the operand and
+	 subtract it from the object size unless *PIGNORE_OFFSET is set.  */
+      bytes = (pignore_offset && *pignore_offset
+	       ? size_zero_node
+	       : compute_object_offset (TREE_OPERAND (ptr, 0), var));
+
       if (bytes != error_mark_node)
 	{
-	  if (TREE_CODE (bytes) == INTEGER_CST
-	      && tree_int_cst_lt (var_size, bytes))
-	    bytes = size_zero_node;
+	  HOST_WIDE_INT offrange[2];
+
+	  if (get_offset_range (bytes, offrange))
+	    {
+	      if (pignore_offset)
+		*pignore_offset = offrange[0] != offrange[1];
+
+	      if (offrange[0] < 0 && offrange[1] < 0)
+		bytes = size_zero_node;
+	      else
+		{
+		  unsigned HOST_WIDE_INT minoff
+		    = (offrange[0] < 0 ? 0 : offrange[0]);
+		  unsigned HOST_WIDE_INT vszi = tree_to_shwi (var_size);
+
+		  if (minoff < vszi)
+		    bytes = size_binop (MINUS_EXPR, var_size,
+					build_int_cst (TREE_TYPE (var_size),
+						       minoff));
+		  else
+		    bytes = size_zero_node;
+		}
+	    }
 	  else
 	    bytes = size_binop (MINUS_EXPR, var_size, bytes);
 	}
+
       if (var != pt_var
 	  && pt_var_size
 	  && TREE_CODE (pt_var) == MEM_REF
@@ -379,10 +559,24 @@  addr_object_size (struct object_size_info *osi, const_tree ptr,
 
   if (tree_fits_uhwi_p (bytes))
     {
+      /* When the size of the object is known exactly (including any
+	 constant offset) set *PSIZE to it.  */
       *psize = tree_to_uhwi (bytes);
       return true;
     }
 
+  if (tree_fits_uhwi_p (var_size) && !integer_zerop (var_size))
+    {
+      /* When the size of the object is known and non-zero but
+	 the offset into it isn't set *PSIZE to the size and ignore
+	 the non-constant offset into it but set *PIGNORE_OFFSET to
+	 true to indicate this to the caller.  */
+      *psize = tree_to_uhwi (var_size);
+      if (pignore_offset)
+	*pignore_offset = true;
+      return true;
+    }
+
   return false;
 }
 
@@ -496,9 +690,10 @@  pass_through_call (const gcall *call)
    to __builtin_object_size.  Return true on success and false
    when the object size could not be determined.  */
 
-bool
-compute_builtin_object_size (tree ptr, int object_size_type,
-			     unsigned HOST_WIDE_INT *psize)
+static bool
+internal_builtin_object_size (object_size_info *posi,
+			      tree ptr, int object_size_type,
+			      unsigned HOST_WIDE_INT *psize)
 {
   gcc_assert (object_size_type >= 0 && object_size_type <= 3);
 
@@ -552,8 +747,14 @@  compute_builtin_object_size (tree ptr, int object_size_type,
       bitmap_iterator bi;
       unsigned int i;
 
-      if (num_ssa_names > object_sizes[object_size_type].length ())
-	object_sizes[object_size_type].safe_grow (num_ssa_names);
+      unsigned count = object_sizes[object_size_type].length ();
+      if (num_ssa_names > count)
+	{
+	  /* Increase size and initialize added elements to unknown.  */
+	  object_sizes[object_size_type].safe_grow (num_ssa_names);
+	  for (i = count; i < num_ssa_names; ++i)
+	    object_sizes[object_size_type][i] = unknown[object_size_type];
+	}
       if (dump_file)
 	{
 	  fprintf (dump_file, "Computing %s %sobject size for ",
@@ -563,25 +764,31 @@  compute_builtin_object_size (tree ptr, int object_size_type,
 	  fprintf (dump_file, ":\n");
 	}
 
-      osi.visited = BITMAP_ALLOC (NULL);
-      osi.reexamine = BITMAP_ALLOC (NULL);
-      osi.object_size_type = object_size_type;
-      osi.depths = NULL;
-      osi.stack = NULL;
-      osi.tos = NULL;
-
-      /* First pass: walk UD chains, compute object sizes that
-	 can be computed.  osi.reexamine bitmap at the end will
-	 contain what variables were found in dependency cycles
-	 and therefore need to be reexamined.  */
-      osi.pass = 0;
-      osi.changed = false;
-      collect_object_sizes_for (&osi, ptr);
+      if (!posi)
+	{
+	  osi.visited = BITMAP_ALLOC (NULL);
+	  osi.reexamine = BITMAP_ALLOC (NULL);
+	  osi.object_size_type = object_size_type;
+	  osi.depths = NULL;
+	  osi.stack = NULL;
+	  osi.tos = NULL;
+
+	  /* First pass: walk UD chains, compute object sizes that
+	     can be computed.  osi.reexamine bitmap at the end will
+	     contain what variables were found in dependency cycles
+	     and therefore need to be reexamined.  */
+	  osi.pass = 0;
+	  osi.changed = false;
+
+	  posi = &osi;
+	}
+
+      collect_object_sizes_for (posi, ptr);
 
       /* Second pass: keep recomputing object sizes of variables
 	 that need reexamination, until no object sizes are
 	 increased or all object sizes are computed.  */
-      if (! bitmap_empty_p (osi.reexamine))
+      if (! bitmap_empty_p (posi->reexamine))
 	{
 	  bitmap reexamine = BITMAP_ALLOC (NULL);
 
@@ -593,35 +800,35 @@  compute_builtin_object_size (tree ptr, int object_size_type,
 	     E.g. p = &buf[0]; while (cond) p = p + 4;  */
 	  if (object_size_type & 2)
 	    {
-	      osi.depths = XCNEWVEC (unsigned int, num_ssa_names);
-	      osi.stack = XNEWVEC (unsigned int, num_ssa_names);
-	      osi.tos = osi.stack;
-	      osi.pass = 1;
+	      posi->depths = XCNEWVEC (unsigned int, num_ssa_names);
+	      posi->stack = XNEWVEC (unsigned int, num_ssa_names);
+	      posi->tos = posi->stack;
+	      posi->pass = 1;
 	      /* collect_object_sizes_for is changing
-		 osi.reexamine bitmap, so iterate over a copy.  */
-	      bitmap_copy (reexamine, osi.reexamine);
+		 posi->reexamine bitmap, so iterate over a copy.  */
+	      bitmap_copy (reexamine, posi->reexamine);
 	      EXECUTE_IF_SET_IN_BITMAP (reexamine, 0, i, bi)
-		if (bitmap_bit_p (osi.reexamine, i))
-		  check_for_plus_in_loops (&osi, ssa_name (i));
-
-	      free (osi.depths);
-	      osi.depths = NULL;
-	      free (osi.stack);
-	      osi.stack = NULL;
-	      osi.tos = NULL;
+		if (bitmap_bit_p (posi->reexamine, i))
+		  check_for_plus_in_loops (posi, ssa_name (i));
+
+	      free (posi->depths);
+	      posi->depths = NULL;
+	      free (posi->stack);
+	      posi->stack = NULL;
+	      posi->tos = NULL;
 	    }
 
 	  do
 	    {
-	      osi.pass = 2;
-	      osi.changed = false;
+	      posi->pass = 2;
+	      posi->changed = false;
 	      /* collect_object_sizes_for is changing
-		 osi.reexamine bitmap, so iterate over a copy.  */
-	      bitmap_copy (reexamine, osi.reexamine);
+		 posi->reexamine bitmap, so iterate over a copy.  */
+	      bitmap_copy (reexamine, posi->reexamine);
 	      EXECUTE_IF_SET_IN_BITMAP (reexamine, 0, i, bi)
-		if (bitmap_bit_p (osi.reexamine, i))
+		if (bitmap_bit_p (posi->reexamine, i))
 		  {
-		    collect_object_sizes_for (&osi, ssa_name (i));
+		    collect_object_sizes_for (posi, ssa_name (i));
 		    if (dump_file && (dump_flags & TDF_DETAILS))
 		      {
 			fprintf (dump_file, "Reexamining ");
@@ -631,17 +838,17 @@  compute_builtin_object_size (tree ptr, int object_size_type,
 		      }
 		  }
 	    }
-	  while (osi.changed);
+	  while (posi->changed);
 
 	  BITMAP_FREE (reexamine);
 	}
-      EXECUTE_IF_SET_IN_BITMAP (osi.reexamine, 0, i, bi)
+      EXECUTE_IF_SET_IN_BITMAP (posi->reexamine, 0, i, bi)
 	bitmap_set_bit (computed[object_size_type], i);
 
       /* Debugging dumps.  */
       if (dump_file)
 	{
-	  EXECUTE_IF_SET_IN_BITMAP (osi.visited, 0, i, bi)
+	  EXECUTE_IF_SET_IN_BITMAP (posi->visited, 0, i, bi)
 	    if (object_sizes[object_size_type][i]
 		!= unknown[object_size_type])
 	      {
@@ -656,17 +863,29 @@  compute_builtin_object_size (tree ptr, int object_size_type,
 	      }
 	}
 
-      BITMAP_FREE (osi.reexamine);
-      BITMAP_FREE (osi.visited);
+      if (posi == &osi)
+	{
+	  BITMAP_FREE (posi->reexamine);
+	  BITMAP_FREE (posi->visited);
+	}
     }
 
   *psize = object_sizes[object_size_type][SSA_NAME_VERSION (ptr)];
   return *psize != unknown[object_size_type];
 }
 
-/* Compute object_sizes for PTR, defined to VALUE, which is not an SSA_NAME.  */
+bool
+compute_builtin_object_size (tree ptr, int object_size_type,
+			     unsigned HOST_WIDE_INT *psize)
+{
+  return internal_builtin_object_size (NULL, ptr, object_size_type, psize);
+}
+
+/* Compute object_sizes for PTR, defined to VALUE, which is not an SSA_NAME.
+   Return true if the expression involved a non-constant offset from PTR
+   that has been (unavoidably) ignored, and false otherwise.  */
 
-static void
+static bool
 expr_object_size (struct object_size_info *osi, tree ptr, tree value)
 {
   int object_size_type = osi->object_size_type;
@@ -684,8 +903,9 @@  expr_object_size (struct object_size_info *osi, tree ptr, tree value)
   gcc_assert (TREE_CODE (value) != SSA_NAME
 	      || !POINTER_TYPE_P (TREE_TYPE (value)));
 
+  bool ignore_offset = false;
   if (TREE_CODE (value) == ADDR_EXPR)
-    addr_object_size (osi, value, object_size_type, &bytes);
+    addr_object_size (osi, value, object_size_type, &bytes, &ignore_offset);
   else
     bytes = unknown[object_size_type];
 
@@ -699,6 +919,8 @@  expr_object_size (struct object_size_info *osi, tree ptr, tree value)
       if (object_sizes[object_size_type][varno] > bytes)
 	object_sizes[object_size_type][varno] = bytes;
     }
+
+  return ignore_offset;
 }
 
 
@@ -769,21 +991,27 @@  merge_object_sizes (struct object_size_info *osi, tree dest, tree orig,
 {
   int object_size_type = osi->object_size_type;
   unsigned int varno = SSA_NAME_VERSION (dest);
-  unsigned HOST_WIDE_INT orig_bytes;
 
   if (object_sizes[object_size_type][varno] == unknown[object_size_type])
     return false;
-  if (offset >= offset_limit)
+
+  /* IGNORE_OFFSET will be set to true if the size of the object
+     does not reflect a non-constant offset into it.  */
+  bool ignore_offset = false;
+  if (osi->pass == 0)
+    ignore_offset = collect_object_sizes_for (osi, orig);
+
+  if (offset >= offset_limit && !ignore_offset)
     {
       object_sizes[object_size_type][varno] = unknown[object_size_type];
       return false;
     }
 
-  if (osi->pass == 0)
-    collect_object_sizes_for (osi, orig);
+  unsigned HOST_WIDE_INT orig_bytes
+    = object_sizes[object_size_type][SSA_NAME_VERSION (orig)];
 
-  orig_bytes = object_sizes[object_size_type][SSA_NAME_VERSION (orig)];
-  if (orig_bytes != unknown[object_size_type])
+  if (orig_bytes != unknown[object_size_type]
+      && (!ignore_offset || offset < HOST_WIDE_INT_MAX))
     orig_bytes = (offset > orig_bytes)
 		 ? HOST_WIDE_INT_0U : orig_bytes - offset;
 
@@ -806,7 +1034,6 @@  merge_object_sizes (struct object_size_info *osi, tree dest, tree orig,
   return bitmap_bit_p (osi->reexamine, SSA_NAME_VERSION (orig));
 }
 
-
 /* Compute object_sizes for VAR, defined to the result of an assignment
    with operator POINTER_PLUS_EXPR.  Return true if the object size might
    need reexamination  later.  */
@@ -819,6 +1046,9 @@  plus_stmt_object_size (struct object_size_info *osi, tree var, gimple *stmt)
   unsigned HOST_WIDE_INT bytes;
   tree op0, op1;
 
+  if (object_sizes[object_size_type][varno] == unknown[object_size_type])
+    return false;
+
   if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
     {
       op0 = gimple_assign_rhs1 (stmt);
@@ -834,36 +1064,87 @@  plus_stmt_object_size (struct object_size_info *osi, tree var, gimple *stmt)
   else
     gcc_unreachable ();
 
-  if (object_sizes[object_size_type][varno] == unknown[object_size_type])
-    return false;
+  /* If the offset is not constant, assume it's either zero for the maximum
+     object size or SIZE_MAX for the minimum size of zero.  */
+  HOST_WIDE_INT offrange[2] = { -HOST_WIDE_INT_MAX - 1, HOST_WIDE_INT_MAX };
+  get_offset_range (op1, offrange);
+
+  if (TREE_CODE (op0) == SSA_NAME && offrange[0] == offrange[1])
+    return merge_object_sizes (osi, var, op0, offrange[0]);
 
-  /* Handle PTR + OFFSET here.  */
-  if (TREE_CODE (op1) == INTEGER_CST
-      && (TREE_CODE (op0) == SSA_NAME
-	  || TREE_CODE (op0) == ADDR_EXPR))
+  if (TREE_CODE (op0) == ADDR_EXPR)
+    addr_object_size (osi, op0, object_size_type, &bytes);
+  else if (!internal_builtin_object_size (osi, op0, object_size_type, &bytes))
+    goto set_size;
+
+  if (offrange[0] == offrange[1])
     {
-      if (! tree_fits_uhwi_p (op1))
+      /* Compute the size of the object referenced by &VAR + OP1
+	 with a constant OP1.  */
+      unsigned HOST_WIDE_INT off = offrange[0];
+
+      if (off > offset_limit)
 	bytes = unknown[object_size_type];
-      else if (TREE_CODE (op0) == SSA_NAME)
-	return merge_object_sizes (osi, var, op0, tree_to_uhwi (op1));
+      else if (off > bytes)
+	bytes = 0;
+      else if (bytes != unknown[object_size_type])
+	bytes -= off;
+    }
+  else
+    {
+      /* Compute the size of the largest object referenced by
+	 &VAR + OP1 with OP1's range OFFRANGE.  */
+      unsigned HOST_WIDE_INT maxbytes = bytes;
+      if (TREE_CODE (op0) == SSA_NAME)
+	{
+	  gimple *def = SSA_NAME_DEF_STMT (op0);
+	  if (gimple_code (def) == GIMPLE_ASSIGN)
+	    {
+	      tree_code code = gimple_assign_rhs_code (def);
+	      if (code == ADDR_EXPR)
+		op0 = gimple_assign_rhs1 (def);
+	    }
+	}
+      if (TREE_CODE (op0) == ADDR_EXPR)
+	{
+	  bool ignore_offset = true;
+	  addr_object_size (osi, op0, object_size_type, &maxbytes,
+			    &ignore_offset);
+	}
+
+      if (bytes == maxbytes)
+	{
+	  /* If the lower bound of the range is positive adjust the size
+	     of the object down to reflect the size of the remainder of
+	     the object.  */
+	  if (0 < offrange[0])
+	    bytes = ((unsigned HOST_WIDE_INT) offrange[0] < bytes
+		     ? bytes - offrange[0] : 0);
+	}
       else
 	{
-	  unsigned HOST_WIDE_INT off = tree_to_uhwi (op1);
-
-          /* op0 will be ADDR_EXPR here.  */
-	  addr_object_size (osi, op0, object_size_type, &bytes);
-	  if (bytes == unknown[object_size_type])
-	    ;
-	  else if (off > offset_limit)
-	    bytes = unknown[object_size_type];
-	  else if (off > bytes)
-	    bytes = 0;
+	  unsigned HOST_WIDE_INT firstoff = maxbytes - bytes;
+
+	  /* Compute the largest object size, accounting for
+	     the non-constant offset.  */
+	  if (offrange[0] < 0)
+	    {
+	      if (firstoff <= (unsigned HOST_WIDE_INT) -offrange[0])
+		bytes = maxbytes;
+	      else
+		bytes = maxbytes + offrange[0];
+	    }
 	  else
-	    bytes -= off;
+	    {
+	      if (firstoff + offrange[0] < maxbytes)
+		bytes = maxbytes - (firstoff + offrange[0]);
+	      else
+		bytes = 0;
+	    }
 	}
     }
-  else
-    bytes = unknown[object_size_type];
+
+ set_size:
 
   if ((object_size_type & 2) == 0)
     {
@@ -878,7 +1159,6 @@  plus_stmt_object_size (struct object_size_info *osi, tree var, gimple *stmt)
   return false;
 }
 
-
 /* Compute object_sizes for VAR, defined at STMT, which is
    a COND_EXPR.  Return true if the object size might need reexamination
    later.  */
@@ -932,7 +1212,7 @@  cond_expr_object_size (struct object_size_info *osi, tree var, gimple *stmt)
    Otherwise, object size is the maximum of object sizes of variables
    that it might be set to.  */
 
-static void
+static bool
 collect_object_sizes_for (struct object_size_info *osi, tree var)
 {
   int object_size_type = osi->object_size_type;
@@ -940,13 +1220,19 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
   gimple *stmt;
   bool reexamine;
 
+  /* If the size for the variable has already been computed avoid
+     recomputing it and return true if its computed size involved
+     ignoring a non-constant offset into it and false otherwise.  */
   if (bitmap_bit_p (computed[object_size_type], varno))
-    return;
+    return bitmap_bit_p (nonconst_offsets[object_size_type], varno);
 
   if (osi->pass == 0)
     {
       if (bitmap_set_bit (osi->visited, varno))
 	{
+	  /* Initialize the size to the maximum or minimum, opposite
+	     of the size being computed.  The result will be adjusted
+	     down or up, respectively, as the actual size is determined.  */
 	  object_sizes[object_size_type][varno]
 	    = (object_size_type & 2) ? -1 : 0;
 	}
@@ -961,7 +1247,7 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
 	      print_generic_expr (dump_file, var, dump_flags);
 	      fprintf (dump_file, "\n");
 	    }
-	  return;
+	  return false;
 	}
     }
 
@@ -975,6 +1261,11 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
   stmt = SSA_NAME_DEF_STMT (var);
   reexamine = false;
 
+  /* Set to true when the reference to the object whose size is being
+     computed involves a non-constant offset that is not taken into
+     consideration.  */
+  bool ignore_offset = false;
+
   switch (gimple_code (stmt))
     {
     case GIMPLE_ASSIGN:
@@ -993,7 +1284,7 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
                 && POINTER_TYPE_P (TREE_TYPE (rhs)))
               reexamine = merge_object_sizes (osi, var, rhs, 0);
             else
-              expr_object_size (osi, var, rhs);
+              ignore_offset = expr_object_size (osi, var, rhs);
           }
         else
           unknown_object_size (osi, var);
@@ -1059,6 +1350,12 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
       || object_sizes[object_size_type][varno] == unknown[object_size_type])
     {
       bitmap_set_bit (computed[object_size_type], varno);
+
+      /* Make a record of a variable whose reference involves a non-constant
+	 offset (that is unavoidably ignored).  */
+      if (ignore_offset)
+	bitmap_set_bit (nonconst_offsets[object_size_type], varno);
+
       bitmap_clear_bit (osi->reexamine, varno);
     }
   else
@@ -1071,6 +1368,8 @@  collect_object_sizes_for (struct object_size_info *osi, tree var)
 	  fprintf (dump_file, "\n");
 	}
     }
+
+  return ignore_offset;
 }
 
 
@@ -1220,8 +1519,13 @@  init_object_sizes (void)
 
   for (object_size_type = 0; object_size_type <= 3; object_size_type++)
     {
+      /* Allocate and initialize all elements to unknown.  */
       object_sizes[object_size_type].safe_grow (num_ssa_names);
+      for (unsigned i = 0; i != num_ssa_names; ++i)
+	object_sizes[object_size_type][i] = unknown[object_size_type];
+
       computed[object_size_type] = BITMAP_ALLOC (NULL);
+      nonconst_offsets[object_size_type] = BITMAP_ALLOC (NULL);
     }
 
   init_offset_limit ();
@@ -1239,6 +1543,7 @@  fini_object_sizes (void)
     {
       object_sizes[object_size_type].release ();
       BITMAP_FREE (computed[object_size_type]);
+      BITMAP_FREE (nonconst_offsets[object_size_type]);
     }
 }