tree-vect-patterns.c 52.3 KB
Newer Older
1
/* Analysis Utilities for Loop Vectorization.
jakub's avatar
jakub committed
2 3
   Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011
   Free Software Foundation, Inc.
4 5 6 7 8 9
   Contributed by Dorit Nuzman <dorit@il.ibm.com>

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11 12 13 14 15 16 17 18
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
19 20
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */
21 22 23 24 25 26 27 28 29

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "ggc.h"
#include "tree.h"
#include "target.h"
#include "basic-block.h"
30
#include "gimple-pretty-print.h"
31 32 33 34 35 36 37 38 39
#include "tree-flow.h"
#include "tree-dump.h"
#include "cfgloop.h"
#include "expr.h"
#include "optabs.h"
#include "params.h"
#include "tree-data-ref.h"
#include "tree-vectorizer.h"
#include "recog.h"
40
#include "diagnostic-core.h"
41 42

/* Pattern recognition functions  */
irar's avatar
 
irar committed
43 44 45 46 47 48 49
static gimple vect_recog_widen_sum_pattern (VEC (gimple, heap) **, tree *,
					    tree *);
static gimple vect_recog_widen_mult_pattern (VEC (gimple, heap) **, tree *,
					     tree *);
static gimple vect_recog_dot_prod_pattern (VEC (gimple, heap) **, tree *,
					   tree *);
static gimple vect_recog_pow_pattern (VEC (gimple, heap) **, tree *, tree *);
irar's avatar
 
irar committed
50 51
static gimple vect_recog_over_widening_pattern (VEC (gimple, heap) **, tree *,
                                                 tree *);
jakub's avatar
jakub committed
52 53
static gimple vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **,
						  tree *, tree *);
54 55 56
static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
	vect_recog_widen_mult_pattern,
	vect_recog_widen_sum_pattern,
57
	vect_recog_dot_prod_pattern,
irar's avatar
 
irar committed
58
	vect_recog_pow_pattern,
jakub's avatar
jakub committed
59 60
	vect_recog_over_widening_pattern,
	vect_recog_mixed_size_cond_pattern};
61 62 63 64 65 66 67


/* Function widened_name_p

   Check whether NAME, an ssa-name used in USE_STMT,
   is a result of a type-promotion, such that:
     DEF_STMT: NAME = NOP (name0)
hjl's avatar
hjl committed
68
   where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
irar's avatar
 
irar committed
69 70
   If CHECK_SIGN is TRUE, check that either both types are signed or both are
   unsigned.  */
71 72

static bool
irar's avatar
 
irar committed
73 74
widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt,
		bool check_sign)
75 76
{
  tree dummy;
77
  gimple dummy_gimple;
78 79 80 81 82 83 84 85 86 87
  loop_vec_info loop_vinfo;
  stmt_vec_info stmt_vinfo;
  tree type = TREE_TYPE (name);
  tree oprnd0;
  enum vect_def_type dt;
  tree def;

  stmt_vinfo = vinfo_for_stmt (use_stmt);
  loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);

irar's avatar
 
irar committed
88
  if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt))
89 90
    return false;

irar's avatar
 
irar committed
91 92
  if (dt != vect_internal_def
      && dt != vect_external_def && dt != vect_constant_def)
93 94 95 96 97
    return false;

  if (! *def_stmt)
    return false;

98
  if (!is_gimple_assign (*def_stmt))
99 100
    return false;

101
  if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
102 103
    return false;

104
  oprnd0 = gimple_assign_rhs1 (*def_stmt);
105 106 107

  *half_type = TREE_TYPE (oprnd0);
  if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
irar's avatar
 
irar committed
108
      || ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type)) && check_sign)
109 110 111
      || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
    return false;

hjl's avatar
hjl committed
112
  if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
irar's avatar
 
irar committed
113
                           &dt))
114 115 116 117 118
    return false;

  return true;
}

119 120 121 122 123 124 125 126 127 128 129 130
/* Helper to return a new temporary for pattern of TYPE for STMT.  If STMT
   is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */

static tree
vect_recog_temp_ssa_var (tree type, gimple stmt)
{
  tree var = create_tmp_var (type, "patt");

  add_referenced_var (var);
  var = make_ssa_name (var, stmt);
  return var;
}
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148

/* Function vect_recog_dot_prod_pattern

   Try to find the following pattern:

     type x_t, y_t;
     TYPE1 prod;
     TYPE2 sum = init;
   loop:
     sum_0 = phi <init, sum_1>
     S1  x_t = ...
     S2  y_t = ...
     S3  x_T = (TYPE1) x_t;
     S4  y_T = (TYPE1) y_t;
     S5  prod = x_T * y_T;
     [S6  prod = (TYPE2) prod;  #optional]
     S7  sum_1 = prod + sum_0;

hjl's avatar
hjl committed
149 150
   where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
   same size of 'TYPE1' or bigger. This is a special case of a reduction
151
   computation.
hjl's avatar
hjl committed
152

153 154
   Input:

irar's avatar
 
irar committed
155 156 157
   * STMTS: Contains a stmt from which the pattern search begins.  In the
   example, when this function is called with S7, the pattern {S3,S4,S5,S6,S7}
   will be detected.
158 159 160 161 162 163 164 165 166 167

   Output:

   * TYPE_IN: The type of the input arguments to the pattern.

   * TYPE_OUT: The type of the output  of this pattern.

   * Return value: A new stmt that will be used to replace the sequence of
   stmts that constitute the pattern. In this case it will be:
        WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
168 169 170 171 172 173 174 175

   Note: The dot-prod idiom is a widening reduction pattern that is
         vectorized without preserving all the intermediate results. It
         produces only N/2 (widened) results (by summing up pairs of
         intermediate results) rather than all N results.  Therefore, we
         cannot allow this pattern when we want to get all the results and in
         the correct order (as is the case when this computation is in an
         inner-loop nested in an outer-loop that us being vectorized).  */
176

177
static gimple
irar's avatar
 
irar committed
178 179
vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in,
			     tree *type_out)
180
{
irar's avatar
 
irar committed
181
  gimple stmt, last_stmt = VEC_index (gimple, *stmts, 0);
182 183
  tree oprnd0, oprnd1;
  tree oprnd00, oprnd01;
irar's avatar
 
irar committed
184
  stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
185
  tree type, half_type;
186
  gimple pattern_stmt;
187
  tree prod_type;
188 189
  loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
  struct loop *loop = LOOP_VINFO_LOOP (loop_info);
190
  tree var;
191

irar's avatar
 
irar committed
192
  if (!is_gimple_assign (last_stmt))
193 194
    return NULL;

irar's avatar
 
irar committed
195
  type = gimple_expr_type (last_stmt);
196

hjl's avatar
hjl committed
197
  /* Look for the following pattern
198 199
          DX = (TYPE1) X;
          DY = (TYPE1) Y;
hjl's avatar
hjl committed
200
          DPROD = DX * DY;
201 202
          DDPROD = (TYPE2) DPROD;
          sum_1 = DDPROD + sum_0;
hjl's avatar
hjl committed
203
     In which
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
     - DX is double the size of X
     - DY is double the size of Y
     - DX, DY, DPROD all have the same type
     - sum is the same size of DPROD or bigger
     - sum has been recognized as a reduction variable.

     This is equivalent to:
       DPROD = X w* Y;          #widen mult
       sum_1 = DPROD w+ sum_0;  #widen summation
     or
       DPROD = X w* Y;          #widen mult
       sum_1 = DPROD + sum_0;   #summation
   */

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

irar's avatar
 
irar committed
221
  if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
222 223 224 225 226 227 228
    return NULL;

  if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
    {
      /* Has been detected as widening-summation?  */

      stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
229 230
      type = gimple_expr_type (stmt);
      if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
231
        return NULL;
232 233
      oprnd0 = gimple_assign_rhs1 (stmt);
      oprnd1 = gimple_assign_rhs2 (stmt);
234 235 236 237
      half_type = TREE_TYPE (oprnd0);
    }
  else
    {
238
      gimple def_stmt;
239 240 241

      if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
        return NULL;
irar's avatar
 
irar committed
242 243
      oprnd0 = gimple_assign_rhs1 (last_stmt);
      oprnd1 = gimple_assign_rhs2 (last_stmt);
244 245
      if (!types_compatible_p (TREE_TYPE (oprnd0), type)
	  || !types_compatible_p (TREE_TYPE (oprnd1), type))
246
        return NULL;
irar's avatar
 
irar committed
247
      stmt = last_stmt;
248

irar's avatar
 
irar committed
249
      if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt, true))
250 251
        {
          stmt = def_stmt;
252
          oprnd0 = gimple_assign_rhs1 (stmt);
253 254 255 256 257
        }
      else
        half_type = type;
    }

irar's avatar
 
irar committed
258
  /* So far so good.  Since last_stmt was detected as a (summation) reduction,
259 260 261
     we know that oprnd1 is the reduction variable (defined by a loop-header
     phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
     Left to check that oprnd0 is defined by a (widen_)mult_expr  */
262 263
  if (TREE_CODE (oprnd0) != SSA_NAME)
    return NULL;
264 265 266

  prod_type = half_type;
  stmt = SSA_NAME_DEF_STMT (oprnd0);
267 268

  /* It could not be the dot_prod pattern if the stmt is outside the loop.  */
irar's avatar
 
irar committed
269
  if (!gimple_bb (stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
270 271
    return NULL;

hjl's avatar
hjl committed
272
  /* FORNOW.  Can continue analyzing the def-use chain when this stmt in a phi
273
     inside the loop (in case we are analyzing an outer-loop).  */
274
  if (!is_gimple_assign (stmt))
hjl's avatar
hjl committed
275
    return NULL;
276 277
  stmt_vinfo = vinfo_for_stmt (stmt);
  gcc_assert (stmt_vinfo);
irar's avatar
 
irar committed
278
  if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
dorit's avatar
dorit committed
279
    return NULL;
280
  if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
281 282 283 284 285 286
    return NULL;
  if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
    {
      /* Has been detected as a widening multiplication?  */

      stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
287
      if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
288 289 290
        return NULL;
      stmt_vinfo = vinfo_for_stmt (stmt);
      gcc_assert (stmt_vinfo);
irar's avatar
 
irar committed
291
      gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
292 293
      oprnd00 = gimple_assign_rhs1 (stmt);
      oprnd01 = gimple_assign_rhs2 (stmt);
294 295 296 297
    }
  else
    {
      tree half_type0, half_type1;
298
      gimple def_stmt;
299 300
      tree oprnd0, oprnd1;

301 302
      oprnd0 = gimple_assign_rhs1 (stmt);
      oprnd1 = gimple_assign_rhs2 (stmt);
303 304
      if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
          || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
305
        return NULL;
irar's avatar
 
irar committed
306
      if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt, true))
307
        return NULL;
308
      oprnd00 = gimple_assign_rhs1 (def_stmt);
irar's avatar
 
irar committed
309
      if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt, true))
310
        return NULL;
311
      oprnd01 = gimple_assign_rhs1 (def_stmt);
312
      if (!types_compatible_p (half_type0, half_type1))
313 314 315 316 317 318 319 320
        return NULL;
      if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
	return NULL;
    }

  half_type = TREE_TYPE (oprnd00);
  *type_in = half_type;
  *type_out = type;
hjl's avatar
hjl committed
321

322
  /* Pattern detected. Create a stmt to be used to replace the pattern: */
323
  var = vect_recog_temp_ssa_var (type, NULL);
324 325
  pattern_stmt = gimple_build_assign_with_ops3 (DOT_PROD_EXPR, var,
						oprnd00, oprnd01, oprnd1);
hjl's avatar
hjl committed
326

327 328 329
  if (vect_print_dump_info (REPORT_DETAILS))
    {
      fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
330
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
331
    }
332 333 334

  /* We don't allow changing the order of the computation in the inner-loop
     when doing outer-loop vectorization.  */
irar's avatar
 
irar committed
335
  gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
336

337
  return pattern_stmt;
338
}
hjl's avatar
hjl committed
339

irar's avatar
 
irar committed
340 341 342 343 344 345 346 347 348 349 350 351

/* Handle two cases of multiplication by a constant.  The first one is when
   the constant, CONST_OPRND, fits the type (HALF_TYPE) of the second
   operand (OPRND).  In that case, we can peform widen-mult from HALF_TYPE to
   TYPE.

   Otherwise, if the type of the result (TYPE) is at least 4 times bigger than
   HALF_TYPE, and CONST_OPRND fits an intermediate type (2 times smaller than
   TYPE), we can perform widen-mult from the intermediate type to TYPE and
   replace a_T = (TYPE) a_t; with a_it - (interm_type) a_t;  */

static bool
irar's avatar
 
irar committed
352
vect_handle_widen_mult_by_const (gimple stmt, tree const_oprnd, tree *oprnd,
irar's avatar
 
irar committed
353 354 355 356 357
   			         VEC (gimple, heap) **stmts, tree type,
			         tree *half_type, gimple def_stmt)
{
  tree new_type, new_oprnd, tmp;
  gimple new_stmt;
irar's avatar
 
irar committed
358 359
  loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (vinfo_for_stmt (stmt));
  struct loop *loop = LOOP_VINFO_LOOP (loop_info);
irar's avatar
 
irar committed
360 361 362 363 364 365 366 367 368

  if (int_fits_type_p (const_oprnd, *half_type))
    {
      /* CONST_OPRND is a constant of HALF_TYPE.  */
      *oprnd = gimple_assign_rhs1 (def_stmt);
      return true;
    }

  if (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 4)
irar's avatar
 
irar committed
369 370
      || !gimple_bb (def_stmt)
      || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
irar's avatar
 
irar committed
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
      || !vinfo_for_stmt (def_stmt))
    return false;

  /* TYPE is 4 times bigger than HALF_TYPE, try widen-mult for
     a type 2 times bigger than HALF_TYPE.  */
  new_type = build_nonstandard_integer_type (TYPE_PRECISION (type) / 2,
                                             TYPE_UNSIGNED (type));
  if (!int_fits_type_p (const_oprnd, new_type))
    return false;

  /* Use NEW_TYPE for widen_mult.  */
  if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)))
    {
      new_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
      /* Check if the already created pattern stmt is what we need.  */
      if (!is_gimple_assign (new_stmt)
          || gimple_assign_rhs_code (new_stmt) != NOP_EXPR
          || TREE_TYPE (gimple_assign_lhs (new_stmt)) != new_type)
        return false;

      *oprnd = gimple_assign_lhs (new_stmt);
    }
  else
    {
      /* Create a_T = (NEW_TYPE) a_t;  */
      *oprnd = gimple_assign_rhs1 (def_stmt);
      tmp = create_tmp_var (new_type, NULL);
      add_referenced_var (tmp);
      new_oprnd = make_ssa_name (tmp, NULL);
      new_stmt = gimple_build_assign_with_ops (NOP_EXPR, new_oprnd, *oprnd,
					       NULL_TREE);
      STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)) = new_stmt;
      VEC_safe_push (gimple, heap, *stmts, def_stmt);
      *oprnd = new_oprnd;
    }

  *half_type = new_type;
  return true;
}


412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
/* Function vect_recog_widen_mult_pattern

   Try to find the following pattern:

     type a_t, b_t;
     TYPE a_T, b_T, prod_T;

     S1  a_t = ;
     S2  b_t = ;
     S3  a_T = (TYPE) a_t;
     S4  b_T = (TYPE) b_t;
     S5  prod_T = a_T * b_T;

   where type 'TYPE' is at least double the size of type 'type'.

irar's avatar
 
irar committed
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
   Also detect unsgigned cases:

     unsigned type a_t, b_t;
     unsigned TYPE u_prod_T;
     TYPE a_T, b_T, prod_T;

     S1  a_t = ;
     S2  b_t = ;
     S3  a_T = (TYPE) a_t;
     S4  b_T = (TYPE) b_t;
     S5  prod_T = a_T * b_T;
     S6  u_prod_T = (unsigned TYPE) prod_T;

   and multiplication by constants:

     type a_t;
     TYPE a_T, prod_T;

     S1  a_t = ;
     S3  a_T = (TYPE) a_t;
     S5  prod_T = a_T * CONST;

irar's avatar
 
irar committed
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
   A special case of multiplication by constants is when 'TYPE' is 4 times
   bigger than 'type', but CONST fits an intermediate type 2 times smaller
   than 'TYPE'.  In that case we create an additional pattern stmt for S3
   to create a variable of the intermediate type, and perform widen-mult
   on the intermediate type as well:

     type a_t;
     interm_type a_it;
     TYPE a_T, prod_T,  prod_T';

     S1  a_t = ;
     S3  a_T = (TYPE) a_t;
           '--> a_it = (interm_type) a_t;
     S5  prod_T = a_T * CONST;
           '--> prod_T' = a_it w* CONST;
464

irar's avatar
 
irar committed
465 466 467 468 469 470 471 472
   Input/Output:

   * STMTS: Contains a stmt from which the pattern search begins.  In the
   example, when this function is called with S5, the pattern {S3,S4,S5,(S6)}
   is detected.  In case of unsigned widen-mult, the original stmt (S5) is
   replaced with S6 in STMTS.  In case of multiplication by a constant
   of an intermediate type (the last case above), STMTS also contains S3
   (inserted before S5).
473 474 475 476 477

   Output:

   * TYPE_IN: The type of the input arguments to the pattern.

irar's avatar
 
irar committed
478
   * TYPE_OUT: The type of the output of this pattern.
479 480

   * Return value: A new stmt that will be used to replace the sequence of
irar's avatar
 
irar committed
481
   stmts that constitute the pattern.  In this case it will be:
482 483 484
        WIDEN_MULT <a_t, b_t>
*/

485
static gimple
irar's avatar
 
irar committed
486 487
vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
                               tree *type_in, tree *type_out)
488
{
irar's avatar
 
irar committed
489
  gimple last_stmt = VEC_pop (gimple, *stmts);
490
  gimple def_stmt0, def_stmt1;
491 492
  tree oprnd0, oprnd1;
  tree type, half_type0, half_type1;
493
  gimple pattern_stmt;
irar's avatar
 
irar committed
494
  tree vectype, vectype_out = NULL_TREE;
495
  tree dummy;
496
  tree var;
497
  enum tree_code dummy_code;
498 499
  int dummy_int;
  VEC (tree, heap) *dummy_vec;
irar's avatar
 
irar committed
500
  bool op0_ok, op1_ok;
501

irar's avatar
 
irar committed
502
  if (!is_gimple_assign (last_stmt))
503 504
    return NULL;

irar's avatar
 
irar committed
505
  type = gimple_expr_type (last_stmt);
506 507 508 509

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

irar's avatar
 
irar committed
510
  if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
511 512
    return NULL;

irar's avatar
 
irar committed
513 514
  oprnd0 = gimple_assign_rhs1 (last_stmt);
  oprnd1 = gimple_assign_rhs2 (last_stmt);
515 516
  if (!types_compatible_p (TREE_TYPE (oprnd0), type)
      || !types_compatible_p (TREE_TYPE (oprnd1), type))
517 518
    return NULL;

irar's avatar
 
irar committed
519
  /* Check argument 0.  */
irar's avatar
 
irar committed
520
  op0_ok = widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0, false);
irar's avatar
 
irar committed
521
  /* Check argument 1.  */
irar's avatar
 
irar committed
522
  op1_ok = widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1, false);
523

irar's avatar
 
irar committed
524 525 526
  /* In case of multiplication by a constant one of the operands may not match
     the pattern, but not both.  */
  if (!op0_ok && !op1_ok)
527
    return NULL;
irar's avatar
 
irar committed
528 529 530 531 532 533 534 535

  if (op0_ok && op1_ok)
    {
      oprnd0 = gimple_assign_rhs1 (def_stmt0);
      oprnd1 = gimple_assign_rhs1 (def_stmt1);
    }	       
  else if (!op0_ok)
    {
irar's avatar
 
irar committed
536
      if (TREE_CODE (oprnd0) == INTEGER_CST
irar's avatar
 
irar committed
537
	  && TREE_CODE (half_type1) == INTEGER_TYPE
irar's avatar
 
irar committed
538 539
          && vect_handle_widen_mult_by_const (last_stmt, oprnd0, &oprnd1,
                                              stmts, type,
irar's avatar
 
irar committed
540 541
				 	      &half_type1, def_stmt1))
        half_type0 = half_type1;
irar's avatar
 
irar committed
542 543 544 545 546
      else
	return NULL;
    }
  else if (!op1_ok)
    {
irar's avatar
 
irar committed
547
      if (TREE_CODE (oprnd1) == INTEGER_CST
irar's avatar
 
irar committed
548
          && TREE_CODE (half_type0) == INTEGER_TYPE
irar's avatar
 
irar committed
549 550
          && vect_handle_widen_mult_by_const (last_stmt, oprnd1, &oprnd0,
                                              stmts, type,
irar's avatar
 
irar committed
551 552
					      &half_type0, def_stmt0))
        half_type1 = half_type0;
irar's avatar
 
irar committed
553 554 555 556 557 558 559 560 561
      else
        return NULL;
    }

  /* Handle unsigned case.  Look for
     S6  u_prod_T = (unsigned TYPE) prod_T;
     Use unsigned TYPE as the type for WIDEN_MULT_EXPR.  */
  if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (half_type0))
    {
irar's avatar
 
irar committed
562
      tree lhs = gimple_assign_lhs (last_stmt), use_lhs;
irar's avatar
 
irar committed
563 564 565 566 567 568 569 570 571 572 573
      imm_use_iterator imm_iter;
      use_operand_p use_p;
      int nuses = 0;
      gimple use_stmt = NULL;
      tree use_type;

      if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (half_type1))
        return NULL;

      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
        {
jakub's avatar
jakub committed
574 575
	  if (is_gimple_debug (USE_STMT (use_p)))
	    continue;
irar's avatar
 
irar committed
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
          use_stmt = USE_STMT (use_p);
          nuses++;
        }

      if (nuses != 1 || !is_gimple_assign (use_stmt)
          || gimple_assign_rhs_code (use_stmt) != NOP_EXPR)
        return NULL;

      use_lhs = gimple_assign_lhs (use_stmt);
      use_type = TREE_TYPE (use_lhs);
      if (!INTEGRAL_TYPE_P (use_type)
          || (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (use_type))
          || (TYPE_PRECISION (type) != TYPE_PRECISION (use_type)))
        return NULL;

      type = use_type;
irar's avatar
 
irar committed
592
      last_stmt = use_stmt;
irar's avatar
 
irar committed
593
    }
594

595
  if (!types_compatible_p (half_type0, half_type1))
596 597 598 599 600 601 602 603
    return NULL;

  /* Pattern detected.  */
  if (vect_print_dump_info (REPORT_DETAILS))
    fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");

  /* Check target support  */
  vectype = get_vectype_for_scalar_type (half_type0);
604
  vectype_out = get_vectype_for_scalar_type (type);
605
  if (!vectype
belagod's avatar
gcc/  
belagod committed
606
      || !vectype_out
irar's avatar
 
irar committed
607
      || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
608
					  vectype_out, vectype,
609
					  &dummy, &dummy, &dummy_code,
610
					  &dummy_code, &dummy_int, &dummy_vec))
611 612 613
    return NULL;

  *type_in = vectype;
614
  *type_out = vectype_out;
615 616

  /* Pattern supported. Create a stmt to be used to replace the pattern: */
617 618 619 620
  var = vect_recog_temp_ssa_var (type, NULL);
  pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
					       oprnd1);

621
  if (vect_print_dump_info (REPORT_DETAILS))
622 623
    print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);

irar's avatar
 
irar committed
624
  VEC_safe_push (gimple, heap, *stmts, last_stmt);
625
  return pattern_stmt;
626 627 628
}


629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
/* Function vect_recog_pow_pattern

   Try to find the following pattern:

     x = POW (y, N);

   with POW being one of pow, powf, powi, powif and N being
   either 2 or 0.5.

   Input:

   * LAST_STMT: A stmt from which the pattern search begins.

   Output:

   * TYPE_IN: The type of the input arguments to the pattern.

   * TYPE_OUT: The type of the output of this pattern.

   * Return value: A new stmt that will be used to replace the sequence of
   stmts that constitute the pattern. In this case it will be:
650
        x = x * x
651
   or
652
	x = sqrt (x)
653 654
*/

655
static gimple
irar's avatar
 
irar committed
656 657
vect_recog_pow_pattern (VEC (gimple, heap) **stmts, tree *type_in,
			tree *type_out)
658
{
irar's avatar
 
irar committed
659
  gimple last_stmt = VEC_index (gimple, *stmts, 0);
660 661 662
  tree fn, base, exp = NULL;
  gimple stmt;
  tree var;
663

irar's avatar
 
irar committed
664
  if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
665 666
    return NULL;

irar's avatar
 
irar committed
667
  fn = gimple_call_fndecl (last_stmt);
irar's avatar
 
irar committed
668 669 670
  if (fn == NULL_TREE || DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL)
   return NULL;

671 672 673 674 675 676
  switch (DECL_FUNCTION_CODE (fn))
    {
    case BUILT_IN_POWIF:
    case BUILT_IN_POWI:
    case BUILT_IN_POWF:
    case BUILT_IN_POW:
irar's avatar
 
irar committed
677 678
      base = gimple_call_arg (last_stmt, 0);
      exp = gimple_call_arg (last_stmt, 1);
679 680
      if (TREE_CODE (exp) != REAL_CST
	  && TREE_CODE (exp) != INTEGER_CST)
681
        return NULL;
682 683
      break;

684 685
    default:
      return NULL;
686 687 688 689 690 691 692 693 694 695 696 697
    }

  /* We now have a pow or powi builtin function call with a constant
     exponent.  */

  *type_out = NULL_TREE;

  /* Catch squaring.  */
  if ((host_integerp (exp, 0)
       && tree_low_cst (exp, 0) == 2)
      || (TREE_CODE (exp) == REAL_CST
          && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
698 699
    {
      *type_in = TREE_TYPE (base);
700 701 702 703

      var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
      stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
      return stmt;
704
    }
705 706 707 708 709 710

  /* Catch square root.  */
  if (TREE_CODE (exp) == REAL_CST
      && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
    {
      tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
711 712 713
      *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
      if (*type_in)
	{
714 715 716 717 718
	  gimple stmt = gimple_build_call (newfn, 1, base);
	  if (vectorizable_function (stmt, *type_in, *type_in)
	      != NULL_TREE)
	    {
	      var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
hjl's avatar
hjl committed
719
	      gimple_call_set_lhs (stmt, var);
720 721
	      return stmt;
	    }
722
	}
723 724
    }

725
  return NULL;
726 727 728
}


729 730 731 732
/* Function vect_recog_widen_sum_pattern

   Try to find the following pattern:

hjl's avatar
hjl committed
733
     type x_t;
734 735 736 737 738 739 740
     TYPE x_T, sum = init;
   loop:
     sum_0 = phi <init, sum_1>
     S1  x_t = *p;
     S2  x_T = (TYPE) x_t;
     S3  sum_1 = x_T + sum_0;

hjl's avatar
hjl committed
741
   where type 'TYPE' is at least double the size of type 'type', i.e - we're
742
   summing elements of type 'type' into an accumulator of type 'TYPE'. This is
743
   a special case of a reduction computation.
744 745 746 747 748

   Input:

   * LAST_STMT: A stmt from which the pattern search begins. In the example,
   when this function is called with S3, the pattern {S2,S3} will be detected.
hjl's avatar
hjl committed
749

750
   Output:
hjl's avatar
hjl committed
751

752 753 754 755 756 757 758
   * TYPE_IN: The type of the input arguments to the pattern.

   * TYPE_OUT: The type of the output of this pattern.

   * Return value: A new stmt that will be used to replace the sequence of
   stmts that constitute the pattern. In this case it will be:
        WIDEN_SUM <x_t, sum_0>
759

hjl's avatar
hjl committed
760
   Note: The widening-sum idiom is a widening reduction pattern that is
761
	 vectorized without preserving all the intermediate results. It
hjl's avatar
hjl committed
762 763 764 765
         produces only N/2 (widened) results (by summing up pairs of
	 intermediate results) rather than all N results.  Therefore, we
	 cannot allow this pattern when we want to get all the results and in
	 the correct order (as is the case when this computation is in an
766
	 inner-loop nested in an outer-loop that us being vectorized).  */
767

768
static gimple
irar's avatar
 
irar committed
769 770
vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in,
			      tree *type_out)
771
{
irar's avatar
 
irar committed
772
  gimple stmt, last_stmt = VEC_index (gimple, *stmts, 0);
773
  tree oprnd0, oprnd1;
irar's avatar
 
irar committed
774
  stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
775
  tree type, half_type;
776
  gimple pattern_stmt;
777 778
  loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
  struct loop *loop = LOOP_VINFO_LOOP (loop_info);
779
  tree var;
780

irar's avatar
 
irar committed
781
  if (!is_gimple_assign (last_stmt))
782 783
    return NULL;

irar's avatar
 
irar committed
784
  type = gimple_expr_type (last_stmt);
785 786 787 788 789 790 791 792 793 794 795

  /* Look for the following pattern
          DX = (TYPE) X;
          sum_1 = DX + sum_0;
     In which DX is at least double the size of X, and sum_1 has been
     recognized as a reduction variable.
   */

  /* Starting from LAST_STMT, follow the defs of its uses in search
     of the above pattern.  */

irar's avatar
 
irar committed
796
  if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
797 798 799 800 801
    return NULL;

  if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
    return NULL;

irar's avatar
 
irar committed
802 803
  oprnd0 = gimple_assign_rhs1 (last_stmt);
  oprnd1 = gimple_assign_rhs2 (last_stmt);
804 805
  if (!types_compatible_p (TREE_TYPE (oprnd0), type)
      || !types_compatible_p (TREE_TYPE (oprnd1), type))
806 807
    return NULL;

irar's avatar
 
irar committed
808
  /* So far so good.  Since last_stmt was detected as a (summation) reduction,
809 810 811 812 813
     we know that oprnd1 is the reduction variable (defined by a loop-header
     phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
     Left to check that oprnd0 is defined by a cast from type 'type' to type
     'TYPE'.  */

irar's avatar
 
irar committed
814
  if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt, true))
815 816
    return NULL;

817
  oprnd0 = gimple_assign_rhs1 (stmt);
818 819 820 821
  *type_in = half_type;
  *type_out = type;

  /* Pattern detected. Create a stmt to be used to replace the pattern: */
822 823 824 825
  var = vect_recog_temp_ssa_var (type, NULL);
  pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
					       oprnd0, oprnd1);

826 827 828
  if (vect_print_dump_info (REPORT_DETAILS))
    {
      fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
829
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
830
    }
831 832 833

  /* We don't allow changing the order of the computation in the inner-loop
     when doing outer-loop vectorization.  */
irar's avatar
 
irar committed
834
  gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
835

836
  return pattern_stmt;
837 838 839
}


irar's avatar
 
irar committed
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
/* Return TRUE if the operation in STMT can be performed on a smaller type.

   Input:
   STMT - a statement to check.
   DEF - we support operations with two operands, one of which is constant.
         The other operand can be defined by a demotion operation, or by a
         previous statement in a sequence of over-promoted operations.  In the
         later case DEF is used to replace that operand.  (It is defined by a
         pattern statement we created for the previous statement in the
         sequence).

   Input/output:
   NEW_TYPE - Output: a smaller type that we are trying to use.  Input: if not
         NULL, it's the type of DEF.
   STMTS - additional pattern statements.  If a pattern statement (type
         conversion) is created in this function, its original statement is
         added to STMTS.

   Output:
   OP0, OP1 - if the operation fits a smaller type, OP0 and OP1 are the new
         operands to use in the new pattern statement for STMT (will be created
         in vect_recog_over_widening_pattern ()).
   NEW_DEF_STMT - in case DEF has to be promoted, we create two pattern
         statements for STMT: the first one is a type promotion and the second
         one is the operation itself.  We return the type promotion statement
         in NEW_DEF_STMT and further store it in STMT_VINFO_PATTERN_DEF_STMT of
         the second pattern statement.  */

static bool
vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type,
                                  tree *op0, tree *op1, gimple *new_def_stmt,
                                  VEC (gimple, heap) **stmts)
{
  enum tree_code code;
  tree const_oprnd, oprnd;
  tree interm_type = NULL_TREE, half_type, tmp, new_oprnd, type;
  gimple def_stmt, new_stmt;
  bool first = false;
irar's avatar
 
irar committed
878 879
  loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (vinfo_for_stmt (stmt));
  struct loop *loop = LOOP_VINFO_LOOP (loop_info);
irar's avatar
 
irar committed
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908

  *new_def_stmt = NULL;

  if (!is_gimple_assign (stmt))
    return false;

  code = gimple_assign_rhs_code (stmt);
  if (code != LSHIFT_EXPR && code != RSHIFT_EXPR
      && code != BIT_IOR_EXPR && code != BIT_XOR_EXPR && code != BIT_AND_EXPR)
    return false;

  oprnd = gimple_assign_rhs1 (stmt);
  const_oprnd = gimple_assign_rhs2 (stmt);
  type = gimple_expr_type (stmt);

  if (TREE_CODE (oprnd) != SSA_NAME
      || TREE_CODE (const_oprnd) != INTEGER_CST)
    return false;

  /* If we are in the middle of a sequence, we use DEF from a previous
     statement.  Otherwise, OPRND has to be a result of type promotion.  */
  if (*new_type)
    {
      half_type = *new_type;
      oprnd = def;
    }
  else
    {
      first = true;
irar's avatar
 
irar committed
909
      if (!widened_name_p (oprnd, stmt, &half_type, &def_stmt, false)
irar's avatar
 
irar committed
910 911
          || !gimple_bb (def_stmt)
          || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
irar's avatar
 
irar committed
912
          || !vinfo_for_stmt (def_stmt))
irar's avatar
 
irar committed
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
        return false;
    }

  /* Can we perform the operation on a smaller type?  */
  switch (code)
    {
      case BIT_IOR_EXPR:
      case BIT_XOR_EXPR:
      case BIT_AND_EXPR:
        if (!int_fits_type_p (const_oprnd, half_type))
          {
            /* HALF_TYPE is not enough.  Try a bigger type if possible.  */
            if (TYPE_PRECISION (type) < (TYPE_PRECISION (half_type) * 4))
              return false;

            interm_type = build_nonstandard_integer_type (
                        TYPE_PRECISION (half_type) * 2, TYPE_UNSIGNED (type));
            if (!int_fits_type_p (const_oprnd, interm_type))
              return false;
          }

        break;

      case LSHIFT_EXPR:
        /* Try intermediate type - HALF_TYPE is not enough for sure.  */
        if (TYPE_PRECISION (type) < (TYPE_PRECISION (half_type) * 4))
          return false;

        /* Check that HALF_TYPE size + shift amount <= INTERM_TYPE size.
          (e.g., if the original value was char, the shift amount is at most 8
           if we want to use short).  */
        if (compare_tree_int (const_oprnd, TYPE_PRECISION (half_type)) == 1)
          return false;

        interm_type = build_nonstandard_integer_type (
                        TYPE_PRECISION (half_type) * 2, TYPE_UNSIGNED (type));

        if (!vect_supportable_shift (code, interm_type))
          return false;

        break;

      case RSHIFT_EXPR:
        if (vect_supportable_shift (code, half_type))
          break;

        /* Try intermediate type - HALF_TYPE is not supported.  */
        if (TYPE_PRECISION (type) < (TYPE_PRECISION (half_type) * 4))
          return false;

        interm_type = build_nonstandard_integer_type (
                        TYPE_PRECISION (half_type) * 2, TYPE_UNSIGNED (type));

        if (!vect_supportable_shift (code, interm_type))
          return false;

        break;

      default:
        gcc_unreachable ();
    }

  /* There are four possible cases:
     1. OPRND is defined by a type promotion (in that case FIRST is TRUE, it's
        the first statement in the sequence)
        a. The original, HALF_TYPE, is not enough - we replace the promotion
           from HALF_TYPE to TYPE with a promotion to INTERM_TYPE.
        b. HALF_TYPE is sufficient, OPRND is set as the RHS of the original
           promotion.
     2. OPRND is defined by a pattern statement we created.
        a. Its type is not sufficient for the operation, we create a new stmt:
           a type conversion for OPRND from HALF_TYPE to INTERM_TYPE.  We store
           this statement in NEW_DEF_STMT, and it is later put in
           STMT_VINFO_PATTERN_DEF_STMT of the pattern statement for STMT.
        b. OPRND is good to use in the new statement.  */
  if (first)
    {
      if (interm_type)
        {
          /* Replace the original type conversion HALF_TYPE->TYPE with
             HALF_TYPE->INTERM_TYPE.  */
          if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)))
            {
              new_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
              /* Check if the already created pattern stmt is what we need.  */
              if (!is_gimple_assign (new_stmt)
                  || gimple_assign_rhs_code (new_stmt) != NOP_EXPR
                  || TREE_TYPE (gimple_assign_lhs (new_stmt)) != interm_type)
                return false;

              oprnd = gimple_assign_lhs (new_stmt);
            }
          else
            {
              /* Create NEW_OPRND = (INTERM_TYPE) OPRND.  */
              oprnd = gimple_assign_rhs1 (def_stmt);
              tmp = create_tmp_reg (interm_type, NULL);
              add_referenced_var (tmp);
              new_oprnd = make_ssa_name (tmp, NULL);
              new_stmt = gimple_build_assign_with_ops (NOP_EXPR, new_oprnd,
                                                       oprnd, NULL_TREE);
              STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)) = new_stmt;
              VEC_safe_push (gimple, heap, *stmts, def_stmt);
              oprnd = new_oprnd;
            }
        }
      else
        {
          /* Retrieve the operand before the type promotion.  */
          oprnd = gimple_assign_rhs1 (def_stmt);
        }
    }
  else
    {
      if (interm_type)
        {
          /* Create a type conversion HALF_TYPE->INTERM_TYPE.  */
          tmp = create_tmp_reg (interm_type, NULL);
          add_referenced_var (tmp);
          new_oprnd = make_ssa_name (tmp, NULL);
          new_stmt = gimple_build_assign_with_ops (NOP_EXPR, new_oprnd,
                                                   oprnd, NULL_TREE);
          oprnd = new_oprnd;
          *new_def_stmt = new_stmt;
        }

      /* Otherwise, OPRND is already set.  */
    }

  if (interm_type)
    *new_type = interm_type;
  else
    *new_type = half_type;

  *op0 = oprnd;
  *op1 = fold_convert (*new_type, const_oprnd);

  return true;
}


/* Try to find a statement or a sequence of statements that can be performed
   on a smaller type:

     type x_t;
     TYPE x_T, res0_T, res1_T;
   loop:
     S1  x_t = *p;
     S2  x_T = (TYPE) x_t;
     S3  res0_T = op (x_T, C0);
     S4  res1_T = op (res0_T, C1);
     S5  ... = () res1_T;  - type demotion

   where type 'TYPE' is at least double the size of type 'type', C0 and C1 are
   constants.
   Check if S3 and S4 can be done on a smaller type than 'TYPE', it can either
   be 'type' or some intermediate type.  For now, we expect S5 to be a type
   demotion operation.  We also check that S3 and S4 have only one use.
.

*/
static gimple
vect_recog_over_widening_pattern (VEC (gimple, heap) **stmts,
                                  tree *type_in, tree *type_out)
{
  gimple stmt = VEC_pop (gimple, *stmts);
  gimple pattern_stmt = NULL, new_def_stmt, prev_stmt = NULL, use_stmt = NULL;
  tree op0, op1, vectype = NULL_TREE, lhs, use_lhs, use_type;
  imm_use_iterator imm_iter;
  use_operand_p use_p;
  int nuses = 0;
  tree var = NULL_TREE, new_type = NULL_TREE, tmp, new_oprnd;
  bool first;
  struct loop *loop = (gimple_bb (stmt))->loop_father;

  first = true;
  while (1)
    {
      if (!vinfo_for_stmt (stmt)
          || STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (stmt)))
        return NULL;

      new_def_stmt = NULL;
      if (!vect_operation_fits_smaller_type (stmt, var, &new_type,
                                             &op0, &op1, &new_def_stmt,
                                             stmts))
        {
          if (first)
            return NULL;
          else
            break;
        }

      /* STMT can be performed on a smaller type.  Check its uses.  */
      lhs = gimple_assign_lhs (stmt);
      nuses = 0;
      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
        {
          if (is_gimple_debug (USE_STMT (use_p)))
            continue;
          use_stmt = USE_STMT (use_p);
          nuses++;
        }

      if (nuses != 1 || !is_gimple_assign (use_stmt)
          || !gimple_bb (use_stmt)
          || !flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
        return NULL;

      /* Create pattern statement for STMT.  */
      vectype = get_vectype_for_scalar_type (new_type);
      if (!vectype)
        return NULL;

      /* We want to collect all the statements for which we create pattern
         statetments, except for the case when the last statement in the
         sequence doesn't have a corresponding pattern statement.  In such
         case we associate the last pattern statement with the last statement
         in the sequence.  Therefore, we only add an original statetement to
         the list if we know that it is not the last.  */
      if (prev_stmt)
        VEC_safe_push (gimple, heap, *stmts, prev_stmt);

      var = vect_recog_temp_ssa_var (new_type, NULL);
1137 1138 1139
      pattern_stmt
	= gimple_build_assign_with_ops (gimple_assign_rhs_code (stmt), var,
					op0, op1);
irar's avatar
 
irar committed
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
      STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)) = pattern_stmt;
      STMT_VINFO_PATTERN_DEF_STMT (vinfo_for_stmt (stmt)) = new_def_stmt;

      if (vect_print_dump_info (REPORT_DETAILS))
        {
          fprintf (vect_dump, "created pattern stmt: ");
          print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
        }

      prev_stmt = stmt;
      stmt = use_stmt;

      first = false;
    }

  /* We got a sequence.  We expect it to end with a type demotion operation.
     Otherwise, we quit (for now).  There are three possible cases: the
     conversion is to NEW_TYPE (we don't do anything), the conversion is to
     a type bigger than NEW_TYPE and/or the signedness of USE_TYPE and
     NEW_TYPE differs (we create a new conversion statement).  */
  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_stmt)))
    {
      use_lhs = gimple_assign_lhs (use_stmt);
      use_type = TREE_TYPE (use_lhs);
      /* Support only type promotion or signedess change.  */
      if (!INTEGRAL_TYPE_P (use_type)
          || TYPE_PRECISION (new_type) > TYPE_PRECISION (use_type))
        return NULL;

      if (TYPE_UNSIGNED (new_type) != TYPE_UNSIGNED (use_type)
          || TYPE_PRECISION (new_type) != TYPE_PRECISION (use_type))
        {
          /* Create NEW_TYPE->USE_TYPE conversion.  */
          tmp = create_tmp_reg (use_type, NULL);
          add_referenced_var (tmp);
          new_oprnd = make_ssa_name (tmp, NULL);
          pattern_stmt = gimple_build_assign_with_ops (NOP_EXPR, new_oprnd,
                                                       var, NULL_TREE);
          STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) = pattern_stmt;

          *type_in = get_vectype_for_scalar_type (new_type);
          *type_out = get_vectype_for_scalar_type (use_type);

          /* We created a pattern statement for the last statement in the
             sequence, so we don't need to associate it with the pattern
             statement created for PREV_STMT.  Therefore, we add PREV_STMT
             to the list in order to mark it later in vect_pattern_recog_1.  */
          if (prev_stmt)
            VEC_safe_push (gimple, heap, *stmts, prev_stmt);
        }
      else
        {
          if (prev_stmt)
            STMT_VINFO_PATTERN_DEF_STMT (vinfo_for_stmt (use_stmt))
               = STMT_VINFO_PATTERN_DEF_STMT (vinfo_for_stmt (prev_stmt));

          *type_in = vectype;
          *type_out = NULL_TREE;
        }

      VEC_safe_push (gimple, heap, *stmts, use_stmt);
    }
  else
    /* TODO: support general case, create a conversion to the correct type.  */
    return NULL;

  /* Pattern detected.  */
  if (vect_print_dump_info (REPORT_DETAILS))
    {
      fprintf (vect_dump, "vect_recog_over_widening_pattern: detected: ");
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
    }

  return pattern_stmt;
}


jakub's avatar
jakub committed
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
/* Function vect_recog_mixed_size_cond_pattern

   Try to find the following pattern:

     type x_t, y_t;
     TYPE a_T, b_T, c_T;
   loop:
     S1  a_T = x_t CMP y_t ? b_T : c_T;

   where type 'TYPE' is an integral type which has different size
   from 'type'.  b_T and c_T are constants and if 'TYPE' is wider
   than 'type', the constants need to fit into an integer type
   with the same width as 'type'.

   Input:

   * LAST_STMT: A stmt from which the pattern search begins.

   Output:

   * TYPE_IN: The type of the input arguments to the pattern.

   * TYPE_OUT: The type of the output of this pattern.

   * Return value: A new stmt that will be used to replace the pattern.
	Additionally a def_stmt is added.

	a_it = x_t CMP y_t ? b_it : c_it;
	a_T = (TYPE) a_it;  */

static gimple
vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in,
				    tree *type_out)
{
  gimple last_stmt = VEC_index (gimple, *stmts, 0);
  tree cond_expr, then_clause, else_clause;
  stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt), def_stmt_info;
  tree type, vectype, comp_vectype, itype, vecitype;
  enum machine_mode cmpmode;
  gimple pattern_stmt, def_stmt;
  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);

  if (!is_gimple_assign (last_stmt)
      || gimple_assign_rhs_code (last_stmt) != COND_EXPR
      || STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
    return NULL;

  cond_expr = gimple_assign_rhs1 (last_stmt);
  then_clause = gimple_assign_rhs2 (last_stmt);
  else_clause = gimple_assign_rhs3 (last_stmt);

  if (TREE_CODE (then_clause) != INTEGER_CST
      || TREE_CODE (else_clause) != INTEGER_CST)
    return NULL;

jakub's avatar
jakub committed
1272 1273 1274 1275 1276 1277
  if (!COMPARISON_CLASS_P (cond_expr))
    return NULL;

  comp_vectype
    = get_vectype_for_scalar_type (TREE_TYPE (TREE_OPERAND (cond_expr, 0)));
  if (comp_vectype == NULL_TREE)
jakub's avatar
jakub committed
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
    return NULL;

  type = gimple_expr_type (last_stmt);
  cmpmode = GET_MODE_INNER (TYPE_MODE (comp_vectype));

  if (GET_MODE_BITSIZE (TYPE_MODE (type)) == GET_MODE_BITSIZE (cmpmode))
    return NULL;

  vectype = get_vectype_for_scalar_type (type);
  if (vectype == NULL_TREE)
    return NULL;

  if (expand_vec_cond_expr_p (vectype, comp_vectype))
    return NULL;

  itype = build_nonstandard_integer_type (GET_MODE_BITSIZE (cmpmode),
					  TYPE_UNSIGNED (type));
  if (itype == NULL_TREE
      || GET_MODE_BITSIZE (TYPE_MODE (itype)) != GET_MODE_BITSIZE (cmpmode))
    return NULL;

  vecitype = get_vectype_for_scalar_type (itype);
  if (vecitype == NULL_TREE)
    return NULL;

  if (!expand_vec_cond_expr_p (vecitype, comp_vectype))
    return NULL;

  if (GET_MODE_BITSIZE (TYPE_MODE (type)) > GET_MODE_BITSIZE (cmpmode))
    {
      if (!int_fits_type_p (then_clause, itype)
	  || !int_fits_type_p (else_clause, itype))
	return NULL;
    }

  def_stmt
    = gimple_build_assign_with_ops3 (COND_EXPR,
				     vect_recog_temp_ssa_var (itype, NULL),
				     unshare_expr (cond_expr),
				     fold_convert (itype, then_clause),
				     fold_convert (itype, else_clause));
  pattern_stmt
    = gimple_build_assign_with_ops (NOP_EXPR,
				    vect_recog_temp_ssa_var (type, NULL),
				    gimple_assign_lhs (def_stmt), NULL_TREE);

  STMT_VINFO_PATTERN_DEF_STMT (stmt_vinfo) = def_stmt;
  def_stmt_info = new_stmt_vec_info (def_stmt, loop_vinfo, NULL);
  set_vinfo_for_stmt (def_stmt, def_stmt_info);
  STMT_VINFO_VECTYPE (def_stmt_info) = vecitype;
  *type_in = vecitype;
  *type_out = vectype;

  return pattern_stmt;
}


irar's avatar
 
irar committed
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
/* Mark statements that are involved in a pattern.  */

static inline void
vect_mark_pattern_stmts (gimple orig_stmt, gimple pattern_stmt,
                         tree pattern_vectype)
{
  stmt_vec_info pattern_stmt_info, def_stmt_info;
  stmt_vec_info orig_stmt_info = vinfo_for_stmt (orig_stmt);
  loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (orig_stmt_info);
  gimple def_stmt;

  set_vinfo_for_stmt (pattern_stmt,
                      new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
  gimple_set_bb (pattern_stmt, gimple_bb (orig_stmt));
  pattern_stmt_info = vinfo_for_stmt (pattern_stmt);

  STMT_VINFO_RELATED_STMT (pattern_stmt_info) = orig_stmt;
  STMT_VINFO_DEF_TYPE (pattern_stmt_info)
	= STMT_VINFO_DEF_TYPE (orig_stmt_info);
  STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
  STMT_VINFO_IN_PATTERN_P (orig_stmt_info) = true;
  STMT_VINFO_RELATED_STMT (orig_stmt_info) = pattern_stmt;
  STMT_VINFO_PATTERN_DEF_STMT (pattern_stmt_info)
	= STMT_VINFO_PATTERN_DEF_STMT (orig_stmt_info);
  if (STMT_VINFO_PATTERN_DEF_STMT (pattern_stmt_info))
    {
      def_stmt = STMT_VINFO_PATTERN_DEF_STMT (pattern_stmt_info);
      def_stmt_info = vinfo_for_stmt (def_stmt);
jakub's avatar
jakub committed
1363 1364 1365 1366 1367 1368
      if (def_stmt_info == NULL)
	{
	  def_stmt_info = new_stmt_vec_info (def_stmt, loop_vinfo, NULL);
	  set_vinfo_for_stmt (def_stmt, def_stmt_info);
	}
      gimple_set_bb (def_stmt, gimple_bb (orig_stmt));
irar's avatar
 
irar committed
1369 1370 1371
      STMT_VINFO_RELATED_STMT (def_stmt_info) = orig_stmt;
      STMT_VINFO_DEF_TYPE (def_stmt_info)
	= STMT_VINFO_DEF_TYPE (orig_stmt_info);
jakub's avatar
jakub committed
1372 1373
      if (STMT_VINFO_VECTYPE (def_stmt_info) == NULL_TREE)
	STMT_VINFO_VECTYPE (def_stmt_info) = pattern_vectype;
irar's avatar
 
irar committed
1374 1375 1376
    }
}

hjl's avatar
hjl committed
1377
/* Function vect_pattern_recog_1
1378 1379 1380 1381 1382 1383 1384

   Input:
   PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
        computation pattern.
   STMT: A stmt from which the pattern search should start.

   If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
hjl's avatar
hjl committed
1385 1386
   expression that computes the same functionality and can be used to
   replace the sequence of stmts that are involved in the pattern.
1387 1388

   Output:
hjl's avatar
hjl committed
1389 1390 1391
   This function checks if the expression returned by PATTERN_RECOG_FUNC is
   supported in vector form by the target.  We use 'TYPE_IN' to obtain the
   relevant vector type. If 'TYPE_IN' is already a vector type, then this
1392 1393 1394 1395
   indicates that target support had already been checked by PATTERN_RECOG_FUNC.
   If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
   to the available target pattern.

hjl's avatar
hjl committed
1396
   This function also does some bookkeeping, as explained in the documentation
1397 1398 1399
   for vect_recog_pattern.  */

static void
1400 1401 1402
vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
		      gimple_stmt_iterator si,
		      VEC (gimple, heap) **stmts_to_replace)
1403
{
1404
  gimple stmt = gsi_stmt (si), pattern_stmt;
irar's avatar
 
irar committed
1405 1406
  stmt_vec_info stmt_info;
  loop_vec_info loop_vinfo;
1407 1408 1409
  tree pattern_vectype;
  tree type_in, type_out;
  enum tree_code code;
irar's avatar
 
irar committed
1410 1411
  int i;
  gimple next;
1412

1413 1414 1415
  VEC_truncate (gimple, *stmts_to_replace, 0);
  VEC_quick_push (gimple, *stmts_to_replace, stmt);
  pattern_stmt = (* vect_recog_func) (stmts_to_replace, &type_in, &type_out);
1416
  if (!pattern_stmt)
hjl's avatar
hjl committed
1417 1418
    return;

1419
  stmt = VEC_last (gimple, *stmts_to_replace);
irar's avatar
 
irar committed
1420 1421 1422
  stmt_info = vinfo_for_stmt (stmt);
  loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
 
hjl's avatar
hjl committed
1423 1424 1425 1426
  if (VECTOR_MODE_P (TYPE_MODE (type_in)))
    {
      /* No need to check target support (already checked by the pattern
         recognition function).  */
1427 1428 1429
      if (type_out)
	gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out)));
      pattern_vectype = type_out ? type_out : type_in;
1430 1431 1432
    }
  else
    {
ian's avatar
gcc/:  
ian committed
1433
      enum machine_mode vec_mode;
1434 1435 1436 1437
      enum insn_code icode;
      optab optab;

      /* Check target support  */
1438 1439 1440 1441 1442 1443 1444
      type_in = get_vectype_for_scalar_type (type_in);
      if (!type_in)
	return;
      if (type_out)
	type_out = get_vectype_for_scalar_type (type_out);
      else
	type_out = type_in;
1445 1446
      if (!type_out)
	return;
1447
      pattern_vectype = type_out;
1448

1449 1450 1451 1452 1453 1454 1455 1456
      if (is_gimple_assign (pattern_stmt))
	code = gimple_assign_rhs_code (pattern_stmt);
      else
        {
	  gcc_assert (is_gimple_call (pattern_stmt));
	  code = CALL_EXPR;
	}

1457 1458
      optab = optab_for_tree_code (code, type_in, optab_default);
      vec_mode = TYPE_MODE (type_in);
1459
      if (!optab
rsandifo's avatar
gcc/  
rsandifo committed
1460
          || (icode = optab_handler (optab, vec_mode)) == CODE_FOR_nothing
1461
          || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out)))
1462 1463 1464 1465 1466 1467
	return;
    }

  /* Found a vectorizable pattern.  */
  if (vect_print_dump_info (REPORT_DETAILS))
    {
hjl's avatar
hjl committed
1468
      fprintf (vect_dump, "pattern recognized: ");
1469
      print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
1470
    }
hjl's avatar
hjl committed
1471

1472
  /* Mark the stmts that are involved in the pattern. */
irar's avatar
 
irar committed
1473
  vect_mark_pattern_stmts (stmt, pattern_stmt, pattern_vectype);
1474

irar's avatar
 
irar committed
1475 1476
  /* Patterns cannot be vectorized using SLP, because they change the order of
     computation.  */
froydnj's avatar
gcc/  
froydnj committed
1477
  FOR_EACH_VEC_ELT (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i, next)
irar's avatar
 
irar committed
1478 1479
    if (next == stmt)
      VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i); 
irar's avatar
 
irar committed
1480

irar's avatar
 
irar committed
1481 1482 1483
  /* It is possible that additional pattern stmts are created and inserted in
     STMTS_TO_REPLACE.  We create a stmt_info for each of them, and mark the
     relevant statements.  */
1484 1485
  for (i = 0; VEC_iterate (gimple, *stmts_to_replace, i, stmt)
	      && (unsigned) i < (VEC_length (gimple, *stmts_to_replace) - 1);
irar's avatar
 
irar committed
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
       i++)
    {
      stmt_info = vinfo_for_stmt (stmt);
      pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
      if (vect_print_dump_info (REPORT_DETAILS))
        {
          fprintf (vect_dump, "additional pattern stmt: ");
          print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
        }

irar's avatar
 
irar committed
1496
      vect_mark_pattern_stmts (stmt, pattern_stmt, NULL_TREE);
irar's avatar
 
irar committed
1497
    }
1498 1499 1500 1501 1502 1503 1504 1505 1506
}


/* Function vect_pattern_recog

   Input:
   LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
        computation idioms.

irar's avatar
 
irar committed
1507 1508
   Output - for each computation idiom that is detected we create a new stmt
        that provides the same functionality and that can be vectorized.  We
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
        also record some information in the struct_stmt_info of the relevant
        stmts, as explained below:

   At the entry to this function we have the following stmts, with the
   following initial value in the STMT_VINFO fields:

         stmt                     in_pattern_p  related_stmt    vec_stmt
         S1: a_i = ....                 -       -               -
         S2: a_2 = ..use(a_i)..         -       -               -
         S3: a_1 = ..use(a_2)..         -       -               -
         S4: a_0 = ..use(a_1)..         -       -               -
         S5: ... = ..use(a_0)..         -       -               -

   Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
irar's avatar
 
irar committed
1523 1524 1525
   represented by a single stmt.  We then:
   - create a new stmt S6 equivalent to the pattern (the stmt is not
     inserted into the code)
1526 1527 1528
   - fill in the STMT_VINFO fields as follows:

                                  in_pattern_p  related_stmt    vec_stmt
hjl's avatar
hjl committed
1529
         S1: a_i = ....                 -       -               -
1530 1531 1532
         S2: a_2 = ..use(a_i)..         -       -               -
         S3: a_1 = ..use(a_2)..         -       -               -
         S4: a_0 = ..use(a_1)..         true    S6              -
irar's avatar
 
irar committed
1533
          '---> S6: a_new = ....        -       S4              -
1534 1535 1536
         S5: ... = ..use(a_0)..         -       -               -

   (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
irar's avatar
 
irar committed
1537
   to each other through the RELATED_STMT field).
1538 1539 1540 1541 1542 1543

   S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
   of S4 because it will replace all its uses.  Stmts {S1,S2,S3} will
   remain irrelevant unless used by stmts other than S4.

   If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
irar's avatar
 
irar committed
1544
   (because they are marked as irrelevant).  It will vectorize S6, and record
irar's avatar
 
irar committed
1545 1546
   a pointer to the new vector stmt VS6 from S6 (as usual).
   S4 will be skipped, and S5 will be vectorized as usual:
1547 1548 1549 1550 1551 1552 1553

                                  in_pattern_p  related_stmt    vec_stmt
         S1: a_i = ....                 -       -               -
         S2: a_2 = ..use(a_i)..         -       -               -
         S3: a_1 = ..use(a_2)..         -       -               -
       > VS6: va_new = ....             -       -               -
         S4: a_0 = ..use(a_1)..         true    S6              VS6
irar's avatar
 
irar committed
1554
          '---> S6: a_new = ....        -       S4              VS6
1555 1556 1557
       > VS5: ... = ..vuse(va_new)..    -       -               -
         S5: ... = ..use(a_0)..         -       -               -

irar's avatar
 
irar committed
1558
   DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used
1559 1560
   elsewhere), and we'll end up with:

hjl's avatar
hjl committed
1561
        VS6: va_new = ....
irar's avatar
 
irar committed
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
        VS5: ... = ..vuse(va_new)..

   In case of more than one pattern statements, e.g., widen-mult with
   intermediate type:

     S1  a_t = ;
     S2  a_T = (TYPE) a_t;
           '--> S3: a_it = (interm_type) a_t;
     S4  prod_T = a_T * CONST;
           '--> S5: prod_T' = a_it w* CONST;

   there may be other users of a_T outside the pattern.  In that case S2 will
   be marked as relevant (as well as S3), and both S2 and S3 will be analyzed
   and vectorized.  The vector stmt VS2 will be recorded in S2, and VS3 will
   be recorded in S3.  */
1577 1578 1579 1580 1581 1582 1583

void
vect_pattern_recog (loop_vec_info loop_vinfo)
{
  struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
  basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
  unsigned int nbbs = loop->num_nodes;
1584
  gimple_stmt_iterator si;
1585
  unsigned int i, j;
1586
  vect_recog_func_ptr vect_recog_func;
1587
  VEC (gimple, heap) *stmts_to_replace = VEC_alloc (gimple, heap, 1);
1588 1589 1590 1591 1592 1593 1594 1595 1596

  if (vect_print_dump_info (REPORT_DETAILS))
    fprintf (vect_dump, "=== vect_pattern_recog ===");

  /* Scan through the loop stmts, applying the pattern recognition
     functions starting at each stmt visited:  */
  for (i = 0; i < nbbs; i++)
    {
      basic_block bb = bbs[i];
1597
      for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1598 1599 1600 1601
        {
          /* Scan over all generic vect_recog_xxx_pattern functions.  */
          for (j = 0; j < NUM_PATTERNS; j++)
            {
1602 1603
	      vect_recog_func = vect_vect_recog_func_ptrs[j];
	      vect_pattern_recog_1 (vect_recog_func, si,
1604
				    &stmts_to_replace);
1605 1606 1607
            }
        }
    }
1608 1609

  VEC_free (gimple, heap, stmts_to_replace);
1610
}