飞桨paddle技术点整理

2022-09-19 18:15:34 浏览数 (1)

前面的步骤跟乌班图安装Pytorch、Tensorflow Cuda环境 是一样。

安装GPU版本的paddle

代码语言:javascript复制
python -m pip install paddlepaddle-gpu==2.3.1.post116 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html

张量

代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([[1, 2], [3, 4]])
    print(a)
    print(a.shape)
    print(a.type)

    b = paddle.ones([2, 2])
    print(b)
    print(b.type)

    c = paddle.zeros([2, 2])
    print(c)
    print(c.type)

    d = paddle.eye(2, 2)
    print(d)
    print(d.type)

    e = paddle.zeros_like(a)
    print(e)
    print(e.type)

    f = paddle.ones_like(a)
    print(f)
    print(f.type)

    g = paddle.arange(0, 11, 1)
    print(g)
    print(g.type)

    h = paddle.linspace(2, 10, 4)
    print(h)

    i = paddle.rand([2, 2])
    print(i)

    j = paddle.normal(mean=0.0, std=paddle.rand([5]))
    print(j)

    k = paddle.uniform(shape=[2, 2])
    print(k)

    l = paddle.randperm(10)
    print(l)

运行结果

代码语言:javascript复制
Tensor(shape=[2, 2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 2],
        [3, 4]])
[2, 2]
VarType.LOD_TENSOR
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1., 1.],
        [1., 1.]])
VarType.LOD_TENSOR
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0., 0.],
        [0., 0.]])
VarType.LOD_TENSOR
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1., 0.],
        [0., 1.]])
VarType.LOD_TENSOR
Tensor(shape=[2, 2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[0, 0],
        [0, 0]])
VarType.LOD_TENSOR
Tensor(shape=[2, 2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 1],
        [1, 1]])
VarType.LOD_TENSOR
Tensor(shape=[11], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10])
VarType.LOD_TENSOR
Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [2.        , 4.66666651, 7.33333349, 10.       ])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.17855753, 0.15026711],
        [0.54343289, 0.04870688]])
Tensor(shape=[5], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [-0.07493367, -0.10425358, -1.67506480,  0.02299307,  0.38065284])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[ 0.01213348, -0.30467188],
        [-0.81535292,  0.09958601]])
Tensor(shape=[10], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [2, 9, 4, 5, 8, 7, 0, 1, 6, 3])
  • 算数运算、矩阵乘法
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([[1., 2.], [3., 4.]])
    print(a)

    b = paddle.ones([2, 2])
    print(b)

    c = a   b
    print(c)

    c = paddle.add(a, b)
    print(c)

    d = paddle.subtract(a, b)
    print(d)

    e = paddle.to_tensor([2., 3.])
    f = a * e
    print(f)

    f = paddle.multiply(a, e)
    print(f)

    g = a / e
    print(g)

    g = paddle.divide(a, e)
    print(g)

    h = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
    i = paddle.to_tensor([[2, 4], [11, 13], [7, 9]], dtype='float32')
    j = paddle.mm(h, i)
    print(j)

    k = paddle.matmul(h, i)
    print(k)

运行结果

代码语言:javascript复制
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1., 2.],
        [3., 4.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1., 1.],
        [1., 1.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2., 3.],
        [4., 5.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2., 3.],
        [4., 5.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0., 1.],
        [2., 3.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2. , 6. ],
        [6. , 12.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2. , 6. ],
        [6. , 12.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.50000000, 0.66666669],
        [1.50000000, 1.33333337]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.50000000, 0.66666669],
        [1.50000000, 1.33333337]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[45. , 57. ],
        [105., 135.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[45. , 57. ],
        [105., 135.]])
  • 平方、开方、对数运算
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([1, 2, 3])
    c = paddle.pow(a, 2)
    print(c)

    c = a**2
    print(c)

    a = paddle.to_tensor([2.])
    c = paddle.exp(a)
    print(c)

    a = paddle.to_tensor([1, 2, 3], dtype='float32')
    c = paddle.sqrt(a)
    print(c)

    c = paddle.log2(a)
    print(c)
    c = paddle.log10(a)
    print(c)
    c = paddle.log(a)
    print(c)

运行结果

代码语言:javascript复制
Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [1, 4, 9])
Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [1, 4, 9])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [7.38905621])
Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [1.        , 1.41421354, 1.73205078])
Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.        , 1.        , 1.58496249])
Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.        , 0.30103001, 0.47712126])
Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.        , 0.69314718, 1.09861231])
  • 取整/取余
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.rand([2, 2])
    b = paddle.multiply(a, paddle.to_tensor([10.]))
    print(b)

    print(paddle.floor(b))
    print(paddle.ceil(b))
    print(paddle.round(b))
    print(paddle.trunc(b))
    print(b % 2)

运行结果

代码语言:javascript复制
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2.33501291, 3.41357899],
        [6.85909081, 5.18760014]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2., 3.],
        [6., 5.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[3., 4.],
        [7., 6.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2., 3.],
        [7., 5.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[2., 3.],
        [6., 5.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.33501291, 1.41357899],
        [0.85909081, 1.18760014]])
  • 比较运算
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
    b = paddle.to_tensor([[1, 4, 9], [6, 5, 7]])
    c = paddle.rand([2, 4])
    d = a
    print(a)
    print(b)

    print(paddle.equal(a, b))
    print(paddle.equal(a, d))
    print(paddle.greater_equal(a, b))
    print(paddle.greater_than(a, b))
    print(paddle.less_equal(a, b))
    print(paddle.less_than(a, b))
    print(paddle.not_equal(a, b))

运行结果

代码语言:javascript复制
Tensor(shape=[2, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 2, 3],
        [4, 5, 6]])
Tensor(shape=[2, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 4, 9],
        [6, 5, 7]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[True , False, False],
        [False, True , False]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[True, True, True],
        [True, True, True]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[True , False, False],
        [False, True , False]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[False, False, False],
        [False, False, False]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[True, True, True],
        [True, True, True]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[False, True , True ],
        [True , False, True ]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[False, True , True ],
        [True , False, True ]])
  • 排序
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([1, 4, 4, 3, 5])
    print(paddle.sort(a))
    print(paddle.sort(a, descending=True))

    b = paddle.to_tensor([[1, 4, 4, 3, 5], [2, 3, 1, 3, 5]])
    print(b.shape)
    print(paddle.sort(b))
    print(paddle.sort(b, axis=0))
    print(paddle.sort(b, descending=True))
    print(paddle.sort(b, axis=0, descending=True))

运行结果

代码语言:javascript复制
Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [1, 3, 4, 4, 5])
Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [5, 4, 4, 3, 1])
[2, 5]
Tensor(shape=[2, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 3, 4, 4, 5],
        [1, 2, 3, 3, 5]])
Tensor(shape=[2, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 3, 1, 3, 5],
        [2, 4, 4, 3, 5]])
Tensor(shape=[2, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[5, 4, 4, 3, 1],
        [5, 3, 3, 2, 1]])
Tensor(shape=[2, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[2, 4, 4, 3, 5],
        [1, 3, 1, 3, 5]])
  • Top K
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([[1, 4, 4, 3, 5], [2, 3, 1, 3, 6]])
    print(paddle.topk(a, k=1, axis=0))
    print(paddle.topk(a, k=2, axis=0))
    print(paddle.topk(a, k=2, axis=1))

运行结果

代码语言:javascript复制
(Tensor(shape=[1, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[2, 4, 4, 3, 6]]), Tensor(shape=[1, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 0, 0, 0, 1]]))
(Tensor(shape=[2, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[2, 4, 4, 3, 6],
        [1, 3, 1, 3, 5]]), Tensor(shape=[2, 5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[1, 0, 0, 0, 1],
        [0, 1, 1, 1, 0]]))
(Tensor(shape=[2, 2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[5, 4],
        [6, 3]]), Tensor(shape=[2, 2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[4, 1],
        [4, 1]]))
  • 第 k 个最小值
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([[1, 4, 4, 3, 5], [2, 3, 1, 3, 6], [4, 5, 6, 7, 8]])
    print(paddle.kthvalue(a, k=2, axis=0))
    print(paddle.kthvalue(a, k=2, axis=1))

运行结果

代码语言:javascript复制
(Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [2, 4, 4, 3, 6]), Tensor(shape=[5], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [1, 0, 0, 1, 1]))
(Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [3, 2, 5]), Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [3, 0, 1]))
  • 数据合法性校验
代码语言:javascript复制
import paddle
import numpy as np

if __name__ == '__main__':

    a = paddle.rand([2, 3])
    b = paddle.to_tensor([1, 2, np.nan])
    print(a)
    print(paddle.isfinite(a))
    print(paddle.isinf(a))
    print(paddle.isnan(a))
    print(paddle.isnan(b))

运行结果

代码语言:javascript复制
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.08867172, 0.27258149, 0.78055871],
        [0.34912518, 0.62152320, 0.54573017]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[True, True, True],
        [True, True, True]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[False, False, False],
        [False, False, False]])
Tensor(shape=[2, 3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[False, False, False],
        [False, False, False]])
Tensor(shape=[3], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [False, False, True ])
  • 三角函数
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.to_tensor([0, 0, 0], dtype='float32')
    print(paddle.cos(a))

运行结果

代码语言:javascript复制
Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [1., 1., 1.])
  • 统计学函数
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.rand([2, 2])
    print(a)
    print(paddle.mean(a))
    print(paddle.mean(a, axis=0))
    print(paddle.sum(a))
    print(paddle.sum(a, axis=0))
    print(paddle.prod(a))
    print(paddle.prod(a, axis=0))
    print(paddle.argmax(a, axis=0))
    print(paddle.argmin(a, axis=0))
    print(paddle.std(a))
    print(paddle.var(a))
    print(paddle.median(a))
    print(paddle.mode(a))
    a = paddle.rand([2, 2]) * 10
    print(a)
    print(paddle.histogram(a, 6, 0, 0))
    a = paddle.randint(0, 10, [10])
    print(a)
    print(paddle.bincount(a))

运行结果

代码语言:javascript复制
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.28592348, 0.81242460],
        [0.54838538, 0.11063743]])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.43934274])
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.41715443, 0.46153101])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [1.75737095])
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.83430886, 0.92306203])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.01409356])
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.15679626, 0.08988457])
Tensor(shape=[2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [1, 0])
Tensor(shape=[2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [0, 1])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.30695549])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.09422167])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.41715443])
(Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.28592348, 0.11063743]), Tensor(shape=[2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [0, 1]))
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[7.70743370, 5.53660393],
        [7.40494251, 3.98108697]])
Tensor(shape=[6], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [1, 0, 1, 0, 0, 2])
Tensor(shape=[10], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [0, 4, 7, 0, 3, 2, 6, 2, 1, 2])
Tensor(shape=[8], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [2, 1, 3, 1, 1, 0, 1, 1])
  • 随机抽样
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    paddle.seed(1)
    mean = paddle.rand([1, 2])
    std = paddle.rand([1, 2])
    print(paddle.normal(mean, std))

运行结果

代码语言:javascript复制
Tensor(shape=[1, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[ 1.11346483, -0.69872946]])
  • 范数运算
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.rand([2, 1])
    b = paddle.rand([2, 1])
    print(a)
    print(b)
    print(paddle.dist(a, b, p=1))
    print(paddle.dist(a, b, p=2))
    print(paddle.dist(a, b, p=3))
    print(paddle.norm(a))
    print(paddle.norm(a, p=3))
    print(paddle.norm(a, p='fro'))

运行结果

代码语言:javascript复制
Tensor(shape=[2, 1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.25732645],
        [0.40564528]])
Tensor(shape=[2, 1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.35750133],
        [0.94703859]])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.64156818])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.55058300])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.54253405])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.48038006])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.43758231])
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [0.48038006])
  • 张量裁剪
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.rand([2, 2]) * 10
    print(a)
    a = paddle.clip(a, 2, 5)
    print(a)

运行结果

代码语言:javascript复制
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[4.89272022, 6.48443699],
        [0.27107078, 4.85858250]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[4.89272022, 5.        ],
        [2.        , 4.85858250]])
  • 张量的索引与数据筛选
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.rand([4, 4])
    b = paddle.rand([4, 4])
    print(a)
    print(b)
    out = paddle.where(a > 0.5, a, b)
    print(out)
    out = paddle.where(a > b)
    print(out)
    out = paddle.index_select(a, axis=0, index=paddle.to_tensor([0, 3, 2]))
    print(out)
    out = paddle.index_select(a, axis=1, index=paddle.to_tensor([0, 3, 2]))
    print(out)

    a = paddle.linspace(1, 16, 16)
    a = paddle.reshape(a, (4, 4))
    print(a)
    out = paddle.gather(a, index=paddle.to_tensor([0, 1, 3]), axis=0)
    print(out)
    out = paddle.gather(a, index=paddle.to_tensor([0, 1, 3]), axis=1)
    print(out)
    mask = paddle.greater_than(a, paddle.to_tensor([8.]))
    print(mask)
    out = paddle.masked_select(a, mask)
    print(out)
    a = paddle.flatten(a)
    out = paddle.take_along_axis(a, indices=paddle.to_tensor([0, 15, 13, 10]), axis=0)
    print(out)
    a = paddle.to_tensor([[0, 1, 2, 0], [2, 3, 0, 1]])
    out = paddle.nonzero(a)
    print(out)

运行结果

代码语言:javascript复制
Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.35779023, 0.89277714, 0.24702056, 0.92913544],
        [0.29648149, 0.45815185, 0.44784531, 0.94065309],
        [0.26437962, 0.86828750, 0.10525739, 0.87954575],
        [0.55159646, 0.11356149, 0.72669047, 0.07444657]])
Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.21640024, 0.85572416, 0.66002953, 0.28534794],
        [0.03093199, 0.11802873, 0.36485839, 0.07965848],
        [0.19432747, 0.38168678, 0.40194315, 0.19759925],
        [0.31319368, 0.17183183, 0.49453658, 0.77549160]])
Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.21640024, 0.89277714, 0.66002953, 0.92913544],
        [0.03093199, 0.11802873, 0.36485839, 0.94065309],
        [0.19432747, 0.86828750, 0.40194315, 0.87954575],
        [0.55159646, 0.17183183, 0.72669047, 0.77549160]])
(Tensor(shape=[12, 1], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[0],
        [0],
        [0],
        [1],
        [1],
        [1],
        [1],
        [2],
        [2],
        [2],
        [3],
        [3]]), Tensor(shape=[12, 1], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[0],
        [1],
        [3],
        [0],
        [1],
        [2],
        [3],
        [0],
        [1],
        [3],
        [0],
        [2]]))
Tensor(shape=[3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.35779023, 0.89277714, 0.24702056, 0.92913544],
        [0.55159646, 0.11356149, 0.72669047, 0.07444657],
        [0.26437962, 0.86828750, 0.10525739, 0.87954575]])
Tensor(shape=[4, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.35779023, 0.92913544, 0.24702056],
        [0.29648149, 0.94065309, 0.44784531],
        [0.26437962, 0.87954575, 0.10525739],
        [0.55159646, 0.07444657, 0.72669047]])
Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1. , 2. , 3. , 4. ],
        [5. , 6. , 7. , 8. ],
        [9. , 10., 11., 12.],
        [13., 14., 15., 16.]])
Tensor(shape=[3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1. , 2. , 3. , 4. ],
        [5. , 6. , 7. , 8. ],
        [13., 14., 15., 16.]])
Tensor(shape=[4, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1. , 2. , 4. ],
        [5. , 6. , 8. ],
        [9. , 10., 12.],
        [13., 14., 16.]])
Tensor(shape=[4, 4], dtype=bool, place=Place(gpu:0), stop_gradient=True,
       [[False, False, False, False],
        [False, False, False, False],
        [True , True , True , True ],
        [True , True , True , True ]])
Tensor(shape=[8], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [9. , 10., 11., 12., 13., 14., 15., 16.])
Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [1. , 16., 14., 11.])
Tensor(shape=[5, 2], dtype=int64, place=Place(gpu:0), stop_gradient=True,
       [[0, 1],
        [0, 2],
        [1, 0],
        [1, 1],
        [1, 3]])

  • 张量的组合与拼接
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.zeros([2, 4])
    b = paddle.ones([2, 4])
    out = paddle.concat((a, b), axis=0)
    print(out)

    a = paddle.linspace(1, 6, 6)
    a = paddle.reshape(a, (2, 3))
    b = paddle.linspace(7, 12, 6)
    b = paddle.reshape(b, (2, 3))
    print(a)
    print(b)
    out = paddle.stack((a, b), axis=1)
    print(out)
    print(out.shape)
    print(out[:, 0, :])
    print(out[:, 1, :])

运行结果

代码语言:javascript复制
Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0., 0., 0., 0.],
        [0., 0., 0., 0.],
        [1., 1., 1., 1.],
        [1., 1., 1., 1.]])
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1., 2., 3.],
        [4., 5., 6.]])
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[7. , 8. , 9. ],
        [10., 11., 12.]])
Tensor(shape=[2, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[1. , 2. , 3. ],
         [7. , 8. , 9. ]],

        [[4. , 5. , 6. ],
         [10., 11., 12.]]])
[2, 2, 3]
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[1., 2., 3.],
        [4., 5., 6.]])
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[7. , 8. , 9. ],
        [10., 11., 12.]])

  • 张量切片
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.rand([3, 4])
    print(a)
    out = paddle.chunk(a, (2, 1), axis=0)
    print(out)
    out = paddle.chunk(a, 2, axis=1)
    print(out)
    out = paddle.split(a, (2, 1), axis=0)
    print(out)
    out = paddle.split(a, 2, axis=1)
    print(out)
    out = paddle.split(a, (1, 1, 1), axis=0)
    print(out)

运行结果

代码语言:javascript复制
Tensor(shape=[3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.72375304, 0.28191790, 0.45890489, 0.79828680],
        [0.10114241, 0.24494733, 0.85273385, 0.31621015],
        [0.78064203, 0.37038296, 0.75661004, 0.32411623]])
[Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.72375304, 0.28191790, 0.45890489, 0.79828680],
        [0.10114241, 0.24494733, 0.85273385, 0.31621015]]), Tensor(shape=[1, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.78064203, 0.37038296, 0.75661004, 0.32411623]])]
[Tensor(shape=[3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.72375304, 0.28191790],
        [0.10114241, 0.24494733],
        [0.78064203, 0.37038296]]), Tensor(shape=[3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.45890489, 0.79828680],
        [0.85273385, 0.31621015],
        [0.75661004, 0.32411623]])]
[Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.72375304, 0.28191790, 0.45890489, 0.79828680],
        [0.10114241, 0.24494733, 0.85273385, 0.31621015]]), Tensor(shape=[1, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.78064203, 0.37038296, 0.75661004, 0.32411623]])]
[Tensor(shape=[3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.72375304, 0.28191790],
        [0.10114241, 0.24494733],
        [0.78064203, 0.37038296]]), Tensor(shape=[3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.45890489, 0.79828680],
        [0.85273385, 0.31621015],
        [0.75661004, 0.32411623]])]
[Tensor(shape=[1, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.72375304, 0.28191790, 0.45890489, 0.79828680]]), Tensor(shape=[1, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.10114241, 0.24494733, 0.85273385, 0.31621015]]), Tensor(shape=[1, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.78064203, 0.37038296, 0.75661004, 0.32411623]])]
  • 张量变形
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.rand([2, 3])
    print(a)
    out = paddle.reshape(a, (3, 2))
    print(out)
    print(paddle.t(out))

    a = paddle.rand([1, 2, 3])
    print(a)
    out = paddle.transpose(a, (1, 0, 2))
    print(out)
    out = paddle.squeeze(a)
    print(out)
    out = paddle.unsqueeze(a, -1)
    print(out)
    out = paddle.unbind(a, axis=1)
    print(out)
    out = paddle.flip(a, axis=1)
    print(out)
    out = paddle.flip(a, axis=2)
    print(out)
    out = paddle.flip(a, axis=[1, 2])
    print(out)
    out = paddle.rot90(a)
    print(out)
    out = paddle.rot90(a, -1)
    print(out)

运行结果

代码语言:javascript复制
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.57933182, 0.92746025, 0.43314070],
        [0.13385081, 0.11243574, 0.38549340]])
Tensor(shape=[3, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.57933182, 0.92746025],
        [0.43314070, 0.13385081],
        [0.11243574, 0.38549340]])
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.57933182, 0.43314070, 0.11243574],
        [0.92746025, 0.13385081, 0.38549340]])
Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[0.31357428, 0.54367834, 0.89613014],
         [0.09769047, 0.61672699, 0.02827156]]])
Tensor(shape=[2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[0.31357428, 0.54367834, 0.89613014]],

        [[0.09769047, 0.61672699, 0.02827156]]])
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.31357428, 0.54367834, 0.89613014],
        [0.09769047, 0.61672699, 0.02827156]])
Tensor(shape=[1, 2, 3, 1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[[0.31357428],
          [0.54367834],
          [0.89613014]],

         [[0.09769047],
          [0.61672699],
          [0.02827156]]]])
[Tensor(shape=[1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.31357428, 0.54367834, 0.89613014]]), Tensor(shape=[1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[0.09769047, 0.61672699, 0.02827156]])]
Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[0.09769047, 0.61672699, 0.02827156],
         [0.31357428, 0.54367834, 0.89613014]]])
Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[0.89613014, 0.54367834, 0.31357428],
         [0.02827156, 0.61672699, 0.09769047]]])
Tensor(shape=[1, 2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[0.02827156, 0.61672699, 0.09769047],
         [0.89613014, 0.54367834, 0.31357428]]])
Tensor(shape=[2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[0.09769047, 0.61672699, 0.02827156]],

        [[0.31357428, 0.54367834, 0.89613014]]])
Tensor(shape=[2, 1, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[[0.31357428, 0.54367834, 0.89613014]],

        [[0.09769047, 0.61672699, 0.02827156]]])
  • 张量填充
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    a = paddle.full((2, 3), 10)
    print(a)

运行结果

代码语言:javascript复制
Tensor(shape=[2, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
       [[10., 10., 10.],
        [10., 10., 10.]])
  • 求导数
代码语言:javascript复制
import paddle

if __name__ == '__main__':

    x = paddle.ones([2, 2])
    x.stop_gradient = False
    y = x   2
    print(y)
    y.backward()
    print(x.grad)

    x = paddle.ones([2, 2])
    x.stop_gradient = False
    y = x   2
    z = y**2 * 3
    z.backward()
    print(x.grad)

运行结果

代码语言:javascript复制
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
       [[3., 3.],
        [3., 3.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
       [[1., 1.],
        [1., 1.]])
Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
       [[18., 18.],
        [18., 18.]])

神经网络的搭建

波士顿房价预测

代码语言:javascript复制
import paddle
from sklearn import datasets

if __name__ == '__main__':

    boston = datasets.load_boston()
    X = paddle.to_tensor(boston.data)
    y = paddle.to_tensor(boston.target)
    y = paddle.unsqueeze(y, -1)
    data = paddle.concat((X, y), axis=-1)
    # print(data)

    y = paddle.squeeze(y)
    train_data = data[:496]
    X_test = X[496:]
    y_test = y[496:]

    class Net(paddle.nn.Layer):

        def __init__(self, n_feature, n_output):
            super(Net, self).__init__()
            self.hidden = paddle.nn.Linear(n_feature, 100)
            self.relu = paddle.nn.ReLU()
            self.predict = paddle.nn.Linear(100, n_output)

        def forward(self, x):
            out = self.hidden(x)
            out = self.relu(out)
            out = self.predict(out)
            return out

    net = Net(13, 1)
    train_loader = paddle.io.DataLoader(train_data, batch_size=10, shuffle=True)
    loss_func = paddle.nn.MSELoss()
    optimizer = paddle.optimizer.Adam(learning_rate=0.01, parameters=net.parameters())

    EPOCH_NUM = 1000
    for epoch in range(EPOCH_NUM):
        for batch_id, data in enumerate(train_loader):
            X_train = data[:, :13].astype('float32')
            y_train = data[:, 13:].astype('float32')
            predict = net(X_train)
            loss = loss_func(predict, y_train) * 0.001
            if batch_id % 20 == 0:
                print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, loss.numpy()))
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()

        predict = net(X_test.astype('float32'))
        loss_test = loss_func(predict, y_test.astype('float32')) * 0.001
        print("epoch: {}, test_loss is: {}".format(epoch, loss_test.numpy()))

运行结果 (部分)

代码语言:javascript复制
epoch: 999, batch_id: 0, loss is: [0.01588636]
epoch: 999, batch_id: 20, loss is: [0.02083369]
epoch: 999, batch_id: 40, loss is: [0.00961253]
epoch: 999, test_loss is: [0.02289972]

手写数字识别

代码语言:javascript复制
import paddle
import paddle.nn as nn
import paddle.dataset.mnist as mnist

if __name__ == '__main__':

    train_reader = paddle.batch(mnist.train(), batch_size=4)
    test_reader = paddle.batch(mnist.test(), batch_size=4)

    class CNN(nn.Layer):

        def __init__(self):
            super(CNN, self).__init__()
            self.conv = nn.Sequential(
                nn.Conv2D(1, 32, 5, stride=1, padding=2),
                nn.BatchNorm2D(32),
                nn.ReLU(),
                nn.MaxPool2D(2)
            )
            self.fc = nn.Linear(14 * 14 * 32, 10)

        def forward(self, x):
            out = self.conv(x)
            out = paddle.reshape(out, (out.shape[0], -1))
            out = self.fc(out)
            return out

    cnn = CNN()
    loss_func = nn.CrossEntropyLoss()
    optimizer = paddle.optimizer.Adam(learning_rate=0.01, parameters=cnn.parameters())

    EPOCH_NUM = 5
    best_acc = 0
    for epoch in range(EPOCH_NUM):
        for batch_id, data in enumerate(train_reader()):
            images = paddle.to_tensor(data[0][0], dtype='float32')
            images = paddle.unsqueeze(images, 0)
            for i in range(1, len(data)):
                tmp = paddle.to_tensor(data[i][0], dtype='float32')
                tmp = paddle.unsqueeze(tmp, 0)
                images = paddle.concat((images, tmp), axis=0)
            images = paddle.reshape(images, (4, 1, 28, 28))
            predict = cnn(images)
            labels = paddle.to_tensor(data[0][1])
            labels = paddle.unsqueeze(labels, 0)
            for i in range(1, len(data)):
                tmp = paddle.to_tensor(data[i][1])
                tmp = paddle.unsqueeze(tmp, 0)
                labels = paddle.concat((labels, tmp), axis=0)
            loss = loss_func(predict, labels)
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()
            print("epoch is {}, batch_id is {}, loss is {}".format(epoch   1, batch_id, loss.item()))
        loss_test = 0
        accuracy = 0
        total = 0
        for batch_id, data in enumerate(test_reader()):
            images = paddle.to_tensor(data[0][0], dtype='float32')
            images = paddle.unsqueeze(images, 0)
            for i in range(1, len(data)):
                tmp = paddle.to_tensor(data[i][0], dtype='float32')
                tmp = paddle.unsqueeze(tmp, 0)
                images = paddle.concat((images, tmp), axis=0)
            images = paddle.reshape(images, (4, 1, 28, 28))
            predict = cnn(images)
            labels = paddle.to_tensor(data[0][1])
            labels = paddle.unsqueeze(labels, 0)
            for i in range(1, len(data)):
                tmp = paddle.to_tensor(data[i][1])
                tmp = paddle.unsqueeze(tmp, 0)
                labels = paddle.concat((labels, tmp), axis=0)
            loss_test  = loss_func(predict, labels)
            pred = paddle.argmax(predict, axis=1)
            accuracy  = (pred == paddle.squeeze(labels)).sum().item()
            total = batch_id
        total *= 4
        accuracy = accuracy / total
        if accuracy > best_acc:
            best_acc = accuracy
        loss_test = loss_test / (total // 4)
        print("epoch is {}, accuracy is {}, loss test is {}, best_acc is {}".format(epoch   1, accuracy, loss_test.item(), best_acc))

运行结果 (部分)

代码语言:javascript复制
epoch is 5, batch_id is 14997, loss is 0.0013393799308687449
epoch is 5, batch_id is 14998, loss is 0.0260640699416399
epoch is 5, batch_id is 14999, loss is 0.00019078730838373303
epoch is 5, accuracy is 0.9109643857543017, loss test is 0.30904391407966614, best_acc is 0.970188075230092

0 人点赞