From 4446b9af37670badb27e32d0b45de2a1e580b808 Mon Sep 17 00:00:00 2001 From: Paddle CI_MAC Date: Thu, 2 Sep 2021 14:24:41 +0800 Subject: [PATCH] mirgate_35267 --- python/paddle/fluid/layers/nn.py | 444 ++++++++++-------- .../fluid/tests/unittests/test_slice_op.py | 51 ++ 2 files changed, 309 insertions(+), 186 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index e5663d607a..4216384b6f 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -26,7 +26,7 @@ import six import paddle from ..layer_helper import LayerHelper from ..initializer import Normal, Constant, NumpyArrayInitializer -from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only +from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags from .. import dygraph_utils from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ @@ -39,6 +39,7 @@ from ...utils import deprecated from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype import paddle from paddle.utils import deprecated +from paddle import _C_ops __all__ = [ 'fc', @@ -201,7 +202,7 @@ def _elementwise_op_in_dygraph(x, act=None, use_mkldnn=False, op_name=None): - op = getattr(core.ops, op_name) + op = getattr(_C_ops, op_name) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) return dygraph_utils._append_activation_in_dygraph( @@ -332,7 +333,8 @@ def fc(input, for i, input_x in enumerate(input): check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc') dtype = helper.input_dtype() - check_dtype(dtype, 'input', ['float16', 'float32', 'float64'], 'fc') + check_dtype(dtype, 'input', ['float16', 'uint16', 'float32', 'float64'], + 'fc') mul_results = [] for input_var, param_attr in helper.iter_inputs_and_params(): input_shape = input_var.shape @@ -1022,6 +1024,18 @@ def dropout(x, if dropout_prob == 0: return x + if in_dygraph_mode(): + if (seed is None or + seed == 0) and default_main_program().random_seed != 0: + seed = default_main_program().random_seed + if is_test is None: + is_test = not _dygraph_tracer()._train_mode + out, mask = _C_ops.dropout( + x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed', + seed is not None, 'seed', seed if seed is not None else 0, + 'dropout_implementation', dropout_implementation) + return out + def get_attrs(prog, dropout_prob, is_test, seed): if (seed is None or seed == 0) and prog.random_seed != 0: seed = prog.random_seed @@ -1034,18 +1048,6 @@ def dropout(x, } return attrs - if in_dygraph_mode(): - if (seed is None or - seed == 0) and default_main_program().random_seed != 0: - seed = default_main_program().random_seed - if is_test is None: - is_test = not _dygraph_tracer()._train_mode - out, mask = core.ops.dropout( - x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed', - seed is not None, 'seed', seed if seed is not None else 0, - 'dropout_implementation', dropout_implementation) - return out - helper = LayerHelper('dropout', **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'dropout') @@ -1332,7 +1334,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1): """ if in_dygraph_mode(): - return core.ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn) + return _C_ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn) inputs = {"X": [input]} attrs = {"axis": axis, "use_cudnn": use_cudnn} @@ -1501,6 +1503,9 @@ def conv2d(input, check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'conv2d') + if len(input.shape) != 4: + raise ValueError("Input size should be 4, " + "but received {}".format(len(input.shape))) num_channels = input.shape[1] if not isinstance(use_cudnn, bool): raise ValueError("Attr(use_cudnn) should be True or False. Received " @@ -1519,6 +1524,20 @@ def conv2d(input, "Received: %s." % (str(input.shape), str(num_channels))) assert param_attr is not False, "param_attr should not be False here." + if groups is None: + num_filter_channels = num_channels + elif groups <= 0: + raise ValueError("the groups of input must be greater than 0, " + "but received the groups of input is {}".format( + groups)) + else: + if num_channels % groups != 0: + raise ValueError( + "the channel of input must be divisible by groups," + "received: the channel of input is {}, the shape of input is {}" + ", the groups is {}".format(num_channels, input.shape, groups)) + num_filter_channels = num_channels // groups + l_type = 'conv2d' if (num_channels == groups and num_filters % num_channels == 0 and not use_cudnn): @@ -1531,16 +1550,6 @@ def conv2d(input, helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() - if groups is None: - num_filter_channels = num_channels - else: - if num_channels % groups != 0: - raise ValueError( - "the channel of input must be divisible by groups," - "received: the channel of input is {}, the shape of input is {}" - ", the groups is {}".format(num_channels, input.shape, groups)) - num_filter_channels = num_channels // groups - filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') dilation = utils.convert_to_list(dilation, 2, 'dilation') @@ -1596,6 +1605,11 @@ def conv2d(input, def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels + if filter_elem_num <= 0: + raise ValueError( + "Invalid filter number, excepted number is larger than 0, but" + " received {}, please check the input shape and " + "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) @@ -1877,6 +1891,12 @@ def conv3d(input, def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * filter_size[ 2] * num_channels + if filter_elem_num <= 0: + raise ValueError( + "Invalid filter number, excepted number is larger than 0, but" + " received {}, please check the input shape and " + "filter size.".format(filter_elem_num)) + std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) @@ -3700,6 +3720,10 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): # create intput and parameters inputs = {'Weight': weight} input_shape = weight.shape + assert weight.numel() > 0, "Any dimension of input cannot be equal to 0." + assert dim < len(input_shape), ("The input `dim` should be less than the " + "rank of `weight`, but received dim=" + "{}".format(dim)) h = input_shape[dim] w = np.prod(input_shape) // h @@ -3904,6 +3928,10 @@ def conv2d_transpose(input, print(conv2d_transpose.shape) # [-1, 2, 34, 34] """ assert param_attr is not False, "param_attr should not be False in conv2d_transpose." + if len(input.shape) != 4: + raise ValueError("Input size should be 4, " + "but received {}".format(len(input.shape))) + if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received " @@ -3995,7 +4023,14 @@ def conv2d_transpose(input, output_size = utils.convert_to_list(output_size, 2, 'output_size') else: raise ValueError("output_size should be int, list[int] or tuple[int]") - groups = 1 if groups is None else groups + + if groups is None: + groups = 1 + elif groups <= 0: + raise ValueError("the groups of input must be greater than 0, " + "but received the groups of input is {}".format( + groups)) + filter_shape = [input_channel, num_filters // groups] + filter_size img_filter = helper.create_parameter( @@ -4396,8 +4431,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): reduce_all = True if dim == None or dim == [] or len(dim) == len( input.shape) else False dim = dim if dim != None and dim != [] else [0] - return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, - 'reduce_all', reduce_all) + return _C_ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, + 'reduce_all', reduce_all) attrs = { 'dim': dim if dim != None and dim != [] else [0], 'keep_dim': keep_dim, @@ -4405,7 +4440,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): if dim == None or dim == [] or len(dim) == len(input.shape) else False } check_variable_and_dtype( - input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum') + input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], + 'reduce_sum') helper = LayerHelper('reduce_sum', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( @@ -4858,6 +4894,7 @@ def split(input, num_or_sections, dim=-1, name=None): if isinstance(dim, Variable): dim = dim.numpy() dim = dim.item(0) + assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" dim = (len(input.shape) + dim) if dim < 0 else dim attrs += ('axis', dim) @@ -4878,7 +4915,7 @@ def split(input, num_or_sections, dim=-1, name=None): raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " "received %s." % (type(num_or_sections))) - return core.ops.split(input, num, *attrs) + return _C_ops.split(input, num, *attrs) check_variable_and_dtype( input, 'input', @@ -4919,6 +4956,7 @@ def split(input, num_or_sections, dim=-1, name=None): dim.stop_gradient = True inputs['AxisTensor'] = dim else: + assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" dim = (len(input_shape) + dim) if dim < 0 else dim attrs['axis'] = dim @@ -5111,16 +5149,10 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32') out = fluid.layers.matmul(x, y, True, True) """ - attrs = { - 'transpose_X': transpose_x, - 'transpose_Y': transpose_y, - 'alpha': float(alpha), - } - if in_dygraph_mode(): out = _varbase_creator(dtype=x.dtype) - core.ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y', - transpose_y, 'alpha', float(alpha)) + _C_ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y', + transpose_y, 'alpha', float(alpha)) return out def __check_input(x, y): @@ -5159,6 +5191,12 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): "But received x_shape[%d] != y_shape[%d]. X's shape: %s, " "Y's shape: %s.\n" % (i, i, x_shape, y_shape)) + attrs = { + 'transpose_X': transpose_x, + 'transpose_Y': transpose_y, + 'alpha': float(alpha), + } + __check_input(x, y) helper = LayerHelper('matmul', **locals()) @@ -5245,7 +5283,7 @@ def topk(input, k, name=None): """ if in_dygraph_mode(): _k = k.numpy().item(0) if isinstance(k, Variable) else k - out, indices = core.ops.top_k(input, 'k', _k) + out, indices = _C_ops.top_k(input, 'k', _k) out.stop_gradient = True indices.stop_gradient = True return out, indices @@ -5488,7 +5526,7 @@ def transpose(x, perm, name=None): """ if in_dygraph_mode(): - out, _ = core.ops.transpose2(x, 'axis', perm) + out, _ = _C_ops.transpose2(x, 'axis', perm) return out check_variable_and_dtype( @@ -5770,7 +5808,7 @@ def multiplex(inputs, index, name=None): """ if in_dygraph_mode(): - return core.ops.multiplex(index, inputs) + return _C_ops.multiplex(index, inputs) helper = LayerHelper('multiplex', **locals()) check_type(inputs, 'inputs', (list), 'multiplex') @@ -5956,8 +5994,8 @@ def one_hot(input, depth, allow_out_of_range=False): assert depth.shape == ( 1, ), "depth of type Variable should have shape [1]" depth = depth.item(0) - out = core.ops.one_hot(input, 'depth', depth, 'allow_out_of_range', - allow_out_of_range) + out = _C_ops.one_hot(input, 'depth', depth, 'allow_out_of_range', + allow_out_of_range) out.stop_gradient = True return out @@ -6138,10 +6176,14 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): item.numpy().item(0) if isinstance(item, Variable) else item for item in shape ] - out, _ = core.ops.reshape2(x, None, 'shape', shape) + out, _ = _C_ops.reshape2(x, None, 'shape', shape) elif isinstance(shape, Variable): shape.stop_gradient = True - out, _ = core.ops.reshape2(x, shape) + out, _ = _C_ops.reshape2(x, shape) + else: + raise ValueError( + "shape must be an instance of `list`, `tuple` or `Variable`," + " got '{}.'".format(type(shape))) return dygraph_utils._append_activation_in_dygraph(out, act) @@ -6262,7 +6304,7 @@ def squeeze(input, axes, name=None): """ if in_dygraph_mode(): - out, _ = core.ops.squeeze2(input, 'axes', axes) + out, _ = _C_ops.squeeze2(input, 'axes', axes) return out helper = LayerHelper("squeeze", **locals()) @@ -6322,7 +6364,7 @@ def unsqueeze(input, axes, name=None): item.numpy().item(0) if isinstance(item, Variable) else item for item in axes ] - out, _ = core.ops.unsqueeze2(input, 'axes', axes) + out, _ = _C_ops.unsqueeze2(input, 'axes', axes) return out check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') @@ -6845,8 +6887,7 @@ def label_smooth(label, raise ValueError("The value of epsilon must be between 0 and 1.") if in_dygraph_mode(): - return core.ops.label_smooth(label, prior_dist, 'epsilon', - float(epsilon)) + return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon)) check_variable_and_dtype(label, 'label', ['float32', 'float64'], 'label_smooth') @@ -6937,7 +6978,7 @@ def roi_pool(input, """ if in_dygraph_mode(): assert rois_num is not None, "rois_num should not be None in dygraph mode." - pool_out, argmaxes = core.ops.roi_pool( + pool_out, argmaxes = _C_ops.roi_pool( input, rois, rois_num, "pooled_height", pooled_height, "pooled_width", pooled_width, "spatial_scale", spatial_scale) return pool_out, argmaxes @@ -7025,7 +7066,7 @@ def roi_align(input, """ if in_dygraph_mode(): assert rois_num is not None, "rois_num should not be None in dygraph mode." - align_out = core.ops.roi_align( + align_out = _C_ops.roi_align( input, rois, rois_num, "pooled_height", pooled_height, "pooled_width", pooled_width, "spatial_scale", spatial_scale, "sampling_ratio", sampling_ratio) @@ -7066,17 +7107,17 @@ def dice_loss(input, label, epsilon=0.00001, name=None): .. math:: - dice\_loss &= 1 - \\frac{2 * intersection\_area}{total\_area} \\\\ - &= \\frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\\\ - &= \\frac{(union\_area - intersection\_area)}{total\_area} + dice\_loss &= 1 - \frac{2 * intersection\_area}{total\_area} \\ + &= \frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\ + &= \frac{(union\_area - intersection\_area)}{total\_area} Parameters: - input (Tensor): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_D]`, where :math:`N_1` is - the batch_size, :math:`N_D` is 1. It is usually the output predictions of sigmoid activation. - The data type can be float32 or float64. - label (Tensor): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_D]`. - where :math:`N_1` is the batch_size, :math:`N_D` is 1. The data type can be float32 or float64. + input (Tensor): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_k, D]`, where :math:`N_1` is + the batch_size, :math:`D` is the number of categories. It is usually the output + predictions of sigmoid activation. The data type can be float32 or float64. + label (Tensor): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_k, 1]`. + where :math:`N_1` is the batch_size. The data type can be int32 or int64. epsilon (float): The epsilon will be added to the numerator and denominator. If both input and label are empty, it makes sure dice is 1. Default: 0.00001 @@ -7098,6 +7139,21 @@ def dice_loss(input, label, epsilon=0.00001, name=None): predictions = F.softmax(x) loss = F.dice_loss(input=predictions, label=label) """ + assert input.dtype in (paddle.float32, paddle.float64) + assert label.dtype in (paddle.int32, paddle.int64) + assert len(input.shape) >= 2, \ + "The rank of input should be greater than or equal to 2." + assert len(input.shape) == len(label.shape), ( + "The rank of input and label should be equal, " + "but received input: %d, label: %d." % + (len(input.shape), len(label.shape))) + assert label.shape[-1] == 1, ("The last dimension of label should be 1, " + "but received %d." % label.shape[-1]) + assert input.shape[:-1] == label.shape[:-1], ( + "All dimensions should be equal except the last one.") + assert input.numel() > 0 and label.numel() > 0, \ + "Any dimension of input and label cannot be equal to 0." + label = one_hot(label, depth=input.shape[-1]) reduce_dim = list(range(1, len(input.shape))) inse = reduce_sum(input * label, dim=reduce_dim) @@ -8294,7 +8350,7 @@ def gather(input, index, overwrite=True): output = fluid.layers.gather(x, index) """ if in_dygraph_mode(): - return core.ops.gather(input, index, None, 'overwrite', overwrite) + return _C_ops.gather(input, index, None, 'overwrite', overwrite) check_variable_and_dtype( input, 'x', @@ -8385,7 +8441,7 @@ def gather_nd(input, index, name=None): """ if in_dygraph_mode(): - return core.ops.gather_nd(input, index) + return _C_ops.gather_nd(input, index) check_variable_and_dtype(input, 'input', ['bool', 'float32', 'float64', 'int32', 'int64'], 'gather_np') @@ -8558,7 +8614,7 @@ def scatter_nd_add(ref, index, updates, name=None): """ if in_dygraph_mode(): - op = getattr(core.ops, 'scatter_nd_add') + op = getattr(_C_ops, 'scatter_nd_add') return op(ref, index, updates) if ref.dtype != updates.dtype: @@ -8704,7 +8760,7 @@ def log(x, name=None): # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]] """ if in_dygraph_mode(): - return core.ops.log(x) + return _C_ops.log(x) check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log") inputs = {'X': [x]} @@ -8744,7 +8800,7 @@ def relu(x, name=None): # [1. 2.6]] """ if in_dygraph_mode(): - return core.ops.relu(x) + return _C_ops.relu(x) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu') @@ -8870,7 +8926,7 @@ def mean_iou(input, label, num_classes): mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes) """ if in_dygraph_mode(): - return core.ops.mean_iou(input, label, 'num_classes', num_classes) + return _C_ops.mean_iou(input, label, 'num_classes', num_classes) helper = LayerHelper('mean_iou', **locals()) check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'], @@ -9038,16 +9094,16 @@ def crop_tensor(x, shape=None, offsets=None, name=None): [6, 7, 8]]] Parameters: - x (Variable): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64. - shape (list|tuple|Variable): The output shape is specified + x (Tensor): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64. + shape (list|tuple|Tensor): The output shape is specified by `shape`. Its data type is int32. If a list/tuple, it's length must be - the same as the dimension size of `x`. If a Variable, it should be a 1-D Tensor. + the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1]. If Variable contained, it is suitable for the case that the shape may be changed each iteration. offsets (list|tuple|Variable, optional): Specifies the cropping offsets at each dimension. Its data type is int32. If a list/tuple, it's length - must be the same as the dimension size of `x`. If a Variable, it should be a 1-D + must be the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1]. If Variable contained, it is suitable for the case that the offsets may be changed each iteration. Default: None, the offsets are 0 at each dimension. @@ -9055,51 +9111,36 @@ def crop_tensor(x, shape=None, offsets=None, name=None): this property. For more information, please refer to :ref:`api_guide_Name` . Returns: - Variable: The cropped Tensor has same data type with `x`. - - Raises: - TypeError: If the data type of `x` is not in: float32, float64, int32, int64. - TypeError: If `shape` is not a list, tuple or Variable. - TypeError: If the data type of `shape` is not int32. - TypeError: If `offsets` is not None and not a list, tuple or Variable. - TypeError: If the data type of `offsets` is not int32. - ValueError: If the element in `offsets` is less than zero. + Tensor: The cropped Tensor has same data type with `x`. Examples: .. code-block:: python + :name: code-example1 - import paddle.fluid as fluid - import paddle.fluid as fluid import paddle - paddle.enable_static() - x = fluid.data(name="x", shape=[None, 3, 5], dtype="float32") - # x.shape = [-1, 3, 5], where -1 indicates batch size, and it will get the exact value in runtime. - - # shape is a 1-D Tensor - crop_shape = fluid.data(name="crop_shape", shape=[3], dtype="int32") - crop0 = fluid.layers.crop_tensor(x, shape=crop_shape) - # crop0.shape = [-1, -1, -1], it means crop0.shape[0] = x.shape[0] in runtime. - - # or shape is a list in which each element is a constant - crop1 = fluid.layers.crop_tensor(x, shape=[-1, -1, 3], offsets=[0, 1, 0]) - # crop1.shape = [-1, 2, 3] - - # or shape is a list in which each element is a constant or Variable - y = fluid.data(name="y", shape=[3, 8, 8], dtype="float32") - dim1 = fluid.data(name="dim1", shape=[1], dtype="int32") - crop2 = fluid.layers.crop_tensor(y, shape=[3, dim1, 4]) - # crop2.shape = [3, -1, 4] - - # offsets is a 1-D Tensor - crop_offsets = fluid.data(name="crop_offsets", shape=[3], dtype="int32") - crop3 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=crop_offsets) - # crop3.shape = [-1, 2, 3] - - # offsets is a list in which each element is a constant or Variable - offsets_var = fluid.data(name="dim1", shape=[1], dtype="int32") - crop4 = fluid.layers.crop_tensor(x, shape=[-1, 2, 3], offsets=[0, 1, offsets_var]) - # crop4.shape = [-1, 2, 3] + x = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + # x.shape = [3, 3] + # x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + + # shape can be a 1-D Tensor or list or tuple. + shape = paddle.to_tensor([2, 2], dtype='int32') + # shape = [2, 2] + # shape = (2, 2) + out = paddle.crop(x, shape) + # out.shape = [2, 2] + # out = [[1,2], [4,5]] + + # offsets can be a 1-D Tensor or list or tuple. + offsets = paddle.to_tensor([0, 1], dtype='int32') + # offsets = [1, 0] + # offsets = (1, 1) + out = paddle.crop(x, shape, offsets) + # out.shape = [2, 2] + # if offsets = [0, 0], out = [[1,2], [4,5]] + # if offsets = [0, 1], out = [[2,3], [5,6]] + # if offsets = [1, 0], out = [[4,5], [7,8]] + # if offsets = [1, 1], out = [[5,6], [8,9]] """ helper = LayerHelper('crop_tensor', **locals()) @@ -9367,15 +9408,15 @@ def pad2d(input, # [5. 4. 5. 6. 5.] # [2. 1. 2. 3. 2.]]]] """ - check_variable_and_dtype( - input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], - "pad2d") - if in_dygraph_mode(): _paddings = paddings.numpy().tolist() if isinstance( paddings, Variable) else paddings - return core.ops.pad2d(input, 'mode', mode, 'pad_value', pad_value, - 'data_format', data_format, 'paddings', _paddings) + return _C_ops.pad2d(input, 'mode', mode, 'pad_value', pad_value, + 'data_format', data_format, 'paddings', _paddings) + + check_variable_and_dtype( + input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], + "pad2d") attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format} inputs = {'X': [input]} @@ -9481,7 +9522,7 @@ def relu6(x, threshold=6.0, name=None): outputs={'Out': out}, attrs={ 'threshold': threshold, - 'use_mkldnn': core.globals()["FLAGS_use_mkldnn"] + 'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"] }) return out @@ -9567,7 +9608,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): """ if in_dygraph_mode(): - return core.ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b) + return _C_ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh') @@ -9609,7 +9650,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]] """ if in_dygraph_mode(): - return core.ops.hard_sigmoid(x, 'slope', slope, 'offset', offset) + return _C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'hard_sigmoid') @@ -9819,7 +9860,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): #[ 1. 10.]] """ if in_dygraph_mode(): - return core.ops.brelu(x, 't_min', t_min, 't_max', t_max) + return _C_ops.brelu(x, 't_min', t_min, 't_max', t_max) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu') @@ -10078,7 +10119,7 @@ def stack(x, axis=0, name=None): axis = 0 if axis is None else axis if in_dygraph_mode(): - return core.ops.stack(x, 'axis', axis) + return _C_ops.stack(x, 'axis', axis) if not isinstance(x, list) and not isinstance(x, tuple): # NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc. @@ -10231,7 +10272,7 @@ def unstack(x, axis=0, num=None): if in_dygraph_mode(): if num == None: num = x.shape[axis] - return core.ops.unstack(x, num, 'axis', int(axis), 'num', num) + return _C_ops.unstack(x, num, 'axis', int(axis), 'num', num) helper = LayerHelper('unstack', **locals()) if num is None: @@ -10327,7 +10368,7 @@ def expand(x, expand_times, name=None): expand_times_tensor = expand_times expand_times_tensor.stop_gradient = True - return core.ops.expand(x, expand_times_tensor, *attrs) + return _C_ops.expand(x, expand_times_tensor, *attrs) inputs = {"X": [x]} attrs = {} @@ -10435,7 +10476,7 @@ def expand_as(x, target_tensor, name=None): """ if in_dygraph_mode(): - return core.ops.expand_as(x, target_tensor) + return _C_ops.expand_as(x, target_tensor) check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as') @@ -10523,10 +10564,10 @@ def uniform_random_batch_size_like(input, """ - check_variable_and_dtype(input, 'Input', ("float32", 'float64'), + check_variable_and_dtype(input, 'Input', ("float32", 'float64', "uint16"), 'uniform_random_batch_size_like') check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like') - check_dtype(dtype, 'dtype', ('float32', 'float64'), + check_dtype(dtype, 'dtype', ('float32', 'float64', "uint16"), 'uniform_random_batch_size_like') helper = LayerHelper('uniform_random_batch_size_like', **locals()) @@ -10651,10 +10692,9 @@ def gaussian_random(shape, if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) - return core.ops.gaussian_random('shape', shape, 'mean', - float(mean), 'std', - float(std), 'seed', seed, 'dtype', - dtype) + return _C_ops.gaussian_random('shape', shape, 'mean', + float(mean), 'std', + float(std), 'seed', seed, 'dtype', dtype) check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn') check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn') @@ -10935,6 +10975,23 @@ def slice(input, axes, starts, ends): attrs = () starts_tensor = None ends_tensor = None + + if isinstance(axes, (list, tuple)): + axes = list(axes) + if len(axes) == 0: + raise ValueError( + "Input axes should not be an empty list/tuple.") + for i in range(len(axes)): + if axes[i] < 0: + axes[i] = max(0, axes[i] + len(input.shape)) + else: + axes[i] = min(len(input.shape) - 1, axes[i]) + + else: + raise ValueError( + "Input axes must be a python list or tuple, but reveived {}". + format(type(axes))) + infer_flags = list(1 for i in range(len(axes))) if isinstance(starts, (list, tuple)): @@ -10959,8 +11016,8 @@ def slice(input, axes, starts, ends): ends_tensor.stop_gradient = True infer_flags = list(-1 for i in range(len(axes))) - return core.ops.slice(input, starts_tensor, ends_tensor, 'axes', axes, - 'infer_flags', infer_flags, *attrs) + return _C_ops.slice(input, starts_tensor, ends_tensor, 'axes', axes, + 'infer_flags', infer_flags, *attrs) if not isinstance(starts, (list, tuple, Variable)): raise ValueError( @@ -11074,7 +11131,7 @@ def strided_slice(input, axes, starts, ends, strides): Then: result = [ [2], ] Args: - input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``float32``, ``float64``, ``int32`` or ``int64``. + input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``bool``, ``float32``, ``float64``, ``int32`` or ``int64``. axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to. It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`. starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of @@ -11125,7 +11182,7 @@ def strided_slice(input, axes, starts, ends, strides): helper = LayerHelper('strided_slice', **locals()) check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int32', 'int64'], + ['bool', 'float32', 'float64', 'int32', 'int64'], 'strided_slice') check_type(axes, 'axes', (list, tuple), 'strided_slice') check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice') @@ -11350,7 +11407,7 @@ def size(input): """ if in_dygraph_mode(): - return core.ops.size(input) + return _C_ops.size(input) check_variable_and_dtype( input, 'input', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size") @@ -11439,9 +11496,9 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): if in_dygraph_mode(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale - out = core.ops.scale(x, 'scale', - float(_scale), 'bias', - float(bias), 'bias_after_scale', bias_after_scale) + out = _C_ops.scale(x, 'scale', + float(_scale), 'bias', + float(bias), 'bias_after_scale', bias_after_scale) return dygraph_utils._append_activation_in_dygraph(out) check_variable_and_dtype(x, "x", [ @@ -11550,7 +11607,7 @@ Examples: axis=axis, act=act, op_name='elementwise_add', - use_mkldnn=core.globals()["FLAGS_use_mkldnn"]) + use_mkldnn=_global_flags()["FLAGS_use_mkldnn"]) return _elementwise_op(LayerHelper('elementwise_add', **locals())) @@ -12127,22 +12184,27 @@ Examples: def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): if in_dygraph_mode(): - op = getattr(core.ops, op_name) + op = getattr(_C_ops, op_name) if binary_op: return op(x, y) else: return op(x) - - check_variable_and_dtype(x, "x", ["bool"], op_name) + check_variable_and_dtype(x, "x", [ + "bool", "int8", "int16", "int32", "int64", "float32", "float64" + ], op_name) if y is not None: - check_variable_and_dtype(y, "y", ["bool"], op_name) + check_variable_and_dtype(y, "y", [ + "bool", "int8", "int16", "int32", "int64", "float32", "float64" + ], op_name) if out is not None: check_type(out, "out", Variable, op_name) helper = LayerHelper(op_name, **locals()) - if binary_op: - assert x.dtype == y.dtype + if binary_op and x.dtype != y.dtype: + raise ValueError( + "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." + % (op_name, x.dtype, y.dtype)) if out is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -12160,7 +12222,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): def logical_and(x, y, out=None, name=None): r""" - ``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``. + ``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: @@ -12171,8 +12233,8 @@ def logical_and(x, y, out=None, name=None): ``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: - x (Tensor): the input tensor, it's data type should be bool. - y (Tensor): the input tensor, it's data type should be bool. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -12196,7 +12258,7 @@ def logical_and(x, y, out=None, name=None): def logical_or(x, y, out=None, name=None): """ - ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``. + ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: @@ -12207,8 +12269,8 @@ def logical_or(x, y, out=None, name=None): ``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: - x (Tensor): the input tensor, it's data type should be bool. - y (Tensor): the input tensor, it's data type should be bool. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -12235,7 +12297,7 @@ def logical_or(x, y, out=None, name=None): def logical_xor(x, y, out=None, name=None): r""" - ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``x``, ``y`` and ``out`` are N-dim boolean ``Tensor``. + ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``. Each element of ``out`` is calculated by .. math:: @@ -12246,8 +12308,8 @@ def logical_xor(x, y, out=None, name=None): ``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`. Args: - x (Tensor): the input tensor, it's data type should be bool. - y (Tensor): the input tensor, it's data type should be bool. + x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. + y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -12275,7 +12337,7 @@ def logical_xor(x, y, out=None, name=None): def logical_not(x, out=None, name=None): """ - ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``x`` and ``out`` are N-dim boolean ``Variable``. + ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``. Each element of ``out`` is calculated by .. math:: @@ -12283,7 +12345,7 @@ def logical_not(x, out=None, name=None): out = !x Args: - x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool. + x(Tensor): Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float32, or float64. out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output. name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`. @@ -12384,7 +12446,7 @@ def clip_by_norm(x, max_norm, name=None): """ if in_dygraph_mode(): - return core.ops.clip_by_norm(x, 'max_norm', max_norm) + return _C_ops.clip_by_norm(x, 'max_norm', max_norm) helper = LayerHelper("clip_by_norm", **locals()) check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm') @@ -12429,7 +12491,7 @@ def mean(x, name=None): """ if in_dygraph_mode(): - return core.ops.mean(x) + return _C_ops.mean(x) helper = LayerHelper("mean", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean') @@ -12510,8 +12572,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): """ if in_dygraph_mode(): - return core.ops.mul(x, y, 'x_num_col_dims', x_num_col_dims, - 'y_num_col_dims', y_num_col_dims) + return _C_ops.mul(x, y, 'x_num_col_dims', x_num_col_dims, + 'y_num_col_dims', y_num_col_dims) inputs = {"X": [x], "Y": [y]} attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims} @@ -13028,7 +13090,10 @@ def grid_sampler(x, grid, name=None): out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x, 'Grid': grid} - helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out}) + attrs = {'use_cudnn': False} if core.is_compiled_with_rocm() else {} + + helper.append_op( + type='grid_sampler', inputs=ipts, outputs={'Output': out}, attrs=attrs) return out @@ -13042,8 +13107,8 @@ def log_loss(input, label, epsilon=1e-4, name=None): .. math:: - Out = -label * \\log{(input + \\epsilon)} - - (1 - label) * \\log{(1 - input + \\epsilon)} + Out = -label * \log{(input + \epsilon)} + - (1 - label) * \log{(1 - input + \epsilon)} Args: input (Tensor|list): A 2-D tensor with shape [N x 1], where N is the @@ -13133,8 +13198,7 @@ def add_position_encoding(input, alpha, beta, name=None): """ if in_dygraph_mode(): - return core.ops.add_position_encoding(input, "alpha", alpha, "beta", - beta) + return _C_ops.add_position_encoding(input, "alpha", alpha, "beta", beta) helper = LayerHelper('add_position_encoding', **locals()) check_variable_and_dtype(input, 'input', ['float32', 'float64'], @@ -13303,7 +13367,7 @@ def shuffle_channel(x, group, name=None): [[0.7, 0.8], [0.8, 0.9]]]] Given group: 2 - then we get a 4-D tensor out whth the same shape of input: + then we get a 4-D tensor out with the same shape of input: out.shape = (1, 4, 2, 2) out.data = [[[[0.1, 0.2], [0.2, 0.3]], @@ -13331,7 +13395,9 @@ def shuffle_channel(x, group, name=None): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle + import paddle.fluid as fluid + paddle.enable_static() input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32') out = fluid.layers.shuffle_channel(x=input, group=2) """ @@ -13388,8 +13454,8 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. " "Received Attr(data_format): {}.".format(data_format)) if in_dygraph_mode(): - return core.ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio', - shift_ratio, 'data_format', data_format) + return _C_ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio', + shift_ratio, 'data_format', data_format) helper = LayerHelper("temporal_shift", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift') @@ -14083,10 +14149,10 @@ def where(condition): out = layers.where(condition) # [[]] """ - helper = LayerHelper("where_index", **locals()) - if in_dygraph_mode(): - return core.ops.where_index(condition) + return _C_ops.where_index(condition) + + helper = LayerHelper("where_index", **locals()) out = helper.create_variable_for_type_inference( dtype=core.VarDesc.VarType.INT64) @@ -14408,6 +14474,11 @@ def deformable_conv(input, def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels + if filter_elem_num <= 0: + raise ValueError( + "Invalid filter number, excepted number is larger than 0, but" + " received {}, please check the input shape and " + "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) @@ -14473,17 +14544,17 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): .. math:: - dkernel[0] &= dilations[0] \\times (kernel\_sizes[0] - 1) + 1 + dkernel[0] &= dilations[0] \times (kernel\_sizes[0] - 1) + 1 - dkernel[1] &= dilations[1] \\times (kernel\_sizes[1] - 1) + 1 + dkernel[1] &= dilations[1] \times (kernel\_sizes[1] - 1) + 1 - hout &= \\frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1 + hout &= \frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1 - wout &= \\frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1 + wout &= \frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1 - Cout &= C \\times kernel\_sizes[0] \\times kernel\_sizes[1] + Cout &= C \times kernel\_sizes[0] \times kernel\_sizes[1] - Lout &= hout \\times wout + Lout &= hout \times wout Parameters: @@ -14768,7 +14839,7 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): the size of the last shard will be less than the calculated `shard_size` Args: - input (Tensor): Input indices with data type int64. It's last dimension must be 1. + input (Tensor): Input indices with data type int64 or int32. It's last dimension must be 1. index_num (int): An integer defining the range of the index. nshards (int): The number of shards. shard_id (int): The index of the current shard. @@ -14789,7 +14860,7 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): print(shard_label) # [[-1], [1]] """ - check_variable_and_dtype(input, 'input', ['int64'], 'shard_index') + check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index') op_type = 'shard_index' helper = LayerHelper(op_type, **locals()) if shard_id < 0 or shard_id >= nshards: @@ -14862,8 +14933,8 @@ def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None): print(out) # [[0.66666667, 1.66666667,3., 4.]] """ if in_dygraph_mode(): - return core.ops.hard_swish(x, 'threshold', threshold, 'scale', scale, - 'offset', offset) + return _C_ops.hard_swish(x, 'threshold', threshold, 'scale', scale, + 'offset', offset) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'hard_swish') @@ -15017,7 +15088,7 @@ def gather_tree(ids, parents): """ if in_dygraph_mode(): - return core.ops.gather_tree(ids, parents) + return _C_ops.gather_tree(ids, parents) else: helper = LayerHelper('gather_tree', **locals()) check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree') @@ -15115,12 +15186,13 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0, if in_dygraph_mode(): shape = utils.convert_shape_to_list(shape) - return core.ops.uniform_random('shape', shape, 'min', - float(min), 'max', - float(max), 'seed', seed, 'dtype', dtype) + return _C_ops.uniform_random('shape', shape, 'min', + float(min), 'max', + float(max), 'seed', seed, 'dtype', dtype) check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand') - check_dtype(dtype, 'dtype', ('float32', 'float64'), 'uniform_random/rand') + check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'), + 'uniform_random/rand') inputs = dict() attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype} diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index bd784b65c1..a80dc87525 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -22,6 +22,8 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers import paddle +paddle.enable_static() + # Situation 1: starts(list, no tensor), ends(list, no tensor) # 1.1 without attr(decrease) @@ -683,6 +685,55 @@ class TestImperativeVarBaseGetItem(unittest.TestCase): self.assertRaises(Exception, test_float_in_index) +class TestInferShape(unittest.TestCase): + def test(self): + x = paddle.ones(shape=[3, 4, 5]) + x.desc.set_shape([3, -1, 5]) + self.assertEqual(x.shape, (3, -1, 5)) + + out0 = paddle.slice(x, axes=[1], starts=[0], ends=[3]) + self.assertEqual(out0.shape, (3, 3, 5)) + + def test_axis_less_than_zero(self): + + # Using paddle.disable_static will make other unittests fail. + with fluid.dygraph.guard(): + x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) + x = paddle.to_tensor(x_arr) + + pp_slice = paddle.slice(x, [100, ], [0], [1]) + np_slice = x_arr[:, :, 0:1] + self.assertTrue(np.array_equal(pp_slice, np_slice)) + + pp_slice = paddle.slice(x, (-100, ), [0], [1]) + np_slice = x_arr[0:1] + self.assertTrue(np.array_equal(pp_slice, np_slice)) + + x_arr = np.array([], dtype=np.float32) + x = paddle.to_tensor(np.reshape(x_arr, (0, 0, 0))) + + starts = paddle.to_tensor( + np.reshape( + np.array( + [], dtype=np.int32), (0, ))) + ends = paddle.to_tensor( + np.reshape( + np.array( + [], dtype=np.int32), (0, ))) + + with self.assertRaises(ValueError): + paddle.slice(x, [-1000000], starts, ends) + + with self.assertRaises(ValueError): + paddle.slice(x, [1000000], starts, ends) + + with self.assertRaises(ValueError): + paddle.slice(x, [], starts, ends) + + with self.assertRaises(ValueError): + paddle.slice(x, 0, starts, ends) + + @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestImperativeCUDAPinnedInput(unittest.TestCase): -- Gitee