From 80b40c73439cf1c83df30123e57b980b95f124e2 Mon Sep 17 00:00:00 2001 From: YuHao Date: Sat, 16 Mar 2024 02:32:51 +0000 Subject: [PATCH] Open ut Signed-off-by: YuHao --- test/network_ops/test_upsample_scale_bicubic2d.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/network_ops/test_upsample_scale_bicubic2d.py b/test/network_ops/test_upsample_scale_bicubic2d.py index a6c47cbdd3..a9b0d9e6bf 100644 --- a/test/network_ops/test_upsample_scale_bicubic2d.py +++ b/test/network_ops/test_upsample_scale_bicubic2d.py @@ -1,4 +1,3 @@ -import unittest import torch import numpy as np import torch_npu @@ -43,7 +42,6 @@ class TestUpsampleBicubic2d(TestCase): return shape_format1 - @unittest.skip("skip test_upsample_bicubic2d_scale_common_shape_format now") def test_upsample_bicubic2d_scale_common_shape_format(self): for item in self.create_scale_shape_format32(): cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 255) @@ -51,7 +49,6 @@ class TestUpsampleBicubic2d(TestCase): npu_output = self.npu_op_scale_exec(npu_input1, item[1]) self.assertRtolEqual(cpu_output, npu_output) - @unittest.skip("skip test_upsample_bicubic2d_float16_scale_shape_format now") def test_upsample_bicubic2d_float16_scale_shape_format(self): def cpu_op_exec_fp16(input1, size): input1 = input1.to(torch.float32) -- Gitee