main_chongxie_ants() main_chongxie_bees() #帮助 #help(target_dir.split) #Return a list of the words in the string, using sep as the delimiter string. #返回字符串中单词的列表,使用 sep 作为分隔符字符串。
defadd_scalar( self, tag, scalar_value, global_step=None, walltime=None, new_style=False, double_precision=False, ): """Add scalar data to summary. Args: tag (str): Data identifier #图标上的标题 scalar_value (float or string/blobname): Value to save #图表上的Y轴 global_step (int): Global step value to record #图标上的X轴 walltime (float): Optional override default walltime (time.time()) with seconds after epoch of event new_style (boolean): Whether to use new style (tensor field) or old style (simple_value field). New style could lead to faster data loading. Examples:: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() x = range(100) for i in x: writer.add_scalar('y=2x', i * 2, i) writer.close() Expected result: .. image:: _static/img/tensorboard/add_scalar.png :scale: 50 % """
示例
1 2 3 4 5 6 7 8
from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter("logs")
#y = x for i inrange(100): writer.add_scalar("y=x",i,i) writer.close()
defadd_image( self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW" ): """Add image data to summary. Note that this requires the ``pillow`` package. Args: tag (str): Data identifier #图像的title img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data #img_tensor图像张量,这里限定了图像的类型,这几种(torch.Tensor, numpy.ndarray, or string/blobname) global_step (int): Global step value to record #要记录的全局步长值 walltime (float): Optional override default walltime (time.time()) seconds after epoch of event dataformats (str): Image data format specification of the form CHW, HWC, HW, WH, etc. Shape: img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job. Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``. Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np img = np.zeros((3, 100, 100)) img[0] = np.arange(0, 10000).reshape(100, 100) / 10000 img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 img_HWC = np.zeros((100, 100, 3)) img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 writer = SummaryWriter() writer.add_image('my_image', img, 0) # If you have non-default dimension setting, set the dataformats argument. writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC') writer.close() Expected result: .. image:: _static/img/tensorboard/add_image.png :scale: 50 % """
示例
1
img_tensor (torch.Tensor, numpy.ndarray, or string/blobname)
#这里要注意一个隐含的点,add.image中对于图片的shape形状 默认是(3,H,W),意思是 (3个通道,高度,宽度) '''Shape: img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job. Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``.'''
classToTensor: """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript. #将常用的图片类型都包含了,比如PIL的类型,numpy的等ndarray类型等 Converts a PIL Image or numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8 In the other cases, tensors are returned without scaling. .. note:: Because the input image is scaled to [0.0, 1.0], this transformation should not be used when transforming target image masks. See the `references`_ for implementing the transforms for image masks. .. _references: https://github.com/pytorch/vision/tree/main/references/segmentation """
PS:add_image中的参数img_tensor需要的类型包括了(torch.Tensor, numpy.ndarray, or string/blobname)
1 2 3 4 5 6 7 8 9 10 11 12 13 14
defadd_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW"): """Add image data to summary. Note that this requires the ``pillow`` package. Args: tag (str): Data identifier img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event dataformats (str): Image data format specification of the form CHW, HWC, HW, WH, etc. ”“”
classCompose: """Composes several transforms together. This transform does not support torchscript. 将多个变换组合在一起。此转换不支持torchscript。 Please, see the note below. 请看下面的注释。 Args: transforms (list of ``Transform`` objects): list of transforms to compose. 要组合的转换列表。 Example: >>> transforms.Compose([ >>> transforms.CenterCrop(10), >>> transforms.PILToTensor(), >>> transforms.ConvertImageDtype(torch.float), >>> ]) .. note:: In order to script the transformations, please use ``torch.nn.Sequential`` as below. >>> transforms = torch.nn.Sequential( >>> transforms.CenterCrop(10), >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), >>> ) >>> scripted_transforms = torch.jit.script(transforms) Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require `lambda` functions or ``PIL.Image``. """
classNormalize(torch.nn.Module): """Normalize a tensor image with mean and standard deviation.使用平均值和标准偏差对张量图像进行归一化。 This transform does not support PIL Image.此转换不支持PIL图像 Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` channels, this transform will normalize each channel of the input ``torch.*Tensor`` i.e., ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` #运算公式 .. note:: This transform acts out of place, i.e., it does not mutate the input tensor. 这个变换的作用是不合时宜的,也就是说,它不会使输入张量发生变化。 Args: mean (sequence): Sequence of means for each channel.每个通道的手段顺序。 std (sequence): Sequence of standard deviations for each channel.每个通道的标准偏差顺序。 inplace(bool,optional): Bool to make this operation in-place. """
#然后我们还可以用可视化工具tensorboad来看一下,归一化后的图像的样子 #This transform acts out of place, i.e., it does not mutate the input tensor.(重点,归一化前后不改变图像的类型) #所以可以继续写入 writer.add_image("norm",img_norm,2) writer.close() #然后运行来创建事例文件,之后在终端进行打开 #于是我们那就得到了恶魔蚂蚁 #之后我们可以随便调几个值,并且利用writer.add_image("norm",img_norm) 参数中的步骤参数 创建对应参数的事例文件 并按步骤进行展示 #若图片不是RGB,而是三维的,可以使用 img = Image.open(img_path).convert('RGB')转化成 RGB 三维的
classResize(torch.nn.Module): """Resize the input image to the given size. If the image is torch Tensor, it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions .. warning:: The output image might be different depending on its type: when downsampling, the interpolation of PIL images and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences in the performance of a network. Therefore, it is preferable to train and serve a model with the same input types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors closer. Args: size (sequence or int): Desired output size. If size is a sequence like (h, w), output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size). .. note:: In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. interpolation (InterpolationMode): Desired interpolation enum defined by :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. For backward compatibility integer values (e.g. ``PIL.Image[.Resampling].NEAREST``) are still accepted, but deprecated since 0.13 and will be removed in 0.15. Please use InterpolationMode enum. max_size (int, optional): The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than ``max_size`` after being resized according to ``size``, then the image is resized again so that the longer edge is equal to ``max_size``. As a result, ``size`` might be overruled, i.e the smaller edge may be shorter than ``size``. This is only supported if ``size`` is an int (or a sequence of length 1 in torchscript mode). antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` modes. This can help making the output for PIL images and tensors closer. """
示例
下面是一个简单的示例,演示了如何使用 transforms.Resize() 对图像进行缩放操作:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
import torchvision.transforms as transforms from PIL import Image
classRandomCrop(torch.nn.Module): """Crop the given image at a random location. If the image is torch Tensor, it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions, but if non-constant padding is used, the input is expected to have at most 2 leading dimensions Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). padding (int or sequence, optional): Optional padding on each border of the image. Default is None. If a single int is provided this is used to pad all borders. If sequence of length 2 is provided this is the padding on left/right and top/bottom respectively. If a sequence of length 4 is provided this is the padding for the left, top, right and bottom borders respectively. .. note:: In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``. pad_if_needed (boolean): It will pad the image if smaller than the desired size to avoid raising an exception. Since cropping is done after padding, the padding seems to be done at a random offset. fill (number or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively. This value is only used when the padding_mode is constant. Only number is supported for torch Tensor. Only int or tuple value is supported for PIL Image. padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant. - constant: pads with a constant value, this value is specified with fill - edge: pads with the last value at the edge of the image. If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2 - reflect: pads with reflection of image without repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2] - symmetric: pads with reflection of image repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] """
# _*_coding:utf-8_*_ #常见的transforms #本节我们主要学习 Resize (改变函数的高和宽,H,W,即大小) compose(流水线化transform) RandomCrop 随机裁剪 from PIL import Image from torch.utils.tensorboard import SummaryWriter from torchvision import transforms img_path = "6tuduidata/train/ants_image/0013035.jpg" img = Image.open(img_path) #学习Resize() ''' class Resize(torch.nn.Module): """Resize the input image to the given size. If the image is torch Tensor, it is expected (重点,这里就给出了输入类型是 tensor) to have [..., H, W] shape, where ... means an arbitrary(任意数量的) number of leading dimensions(前导维数) #RGB: 也就为3 Args: size (sequence(序列) or int): Desired output size. If size is a sequence like (h, w), output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size). In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. interpolation (InterpolationMode): Desired interpolation enum defined by :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. (这里也指定了输入 可以是上个版本的 PIL) sequence(序列):在python中,列表的表示形式为[数据1,数据2,...] 在Compose中,数据需要是 transforms 类型 所以得到,Compose([transforms参数1,transforms参数2,...]) ''' #先来根据类 模板来自定义函数 trans_resize = transforms.Resize([512,512]) #再来转化PIL 图像 img_resize = trans_resize(img)
#之后我们学习一下随机切片 RandomCrop ''' class RandomCrop(torch.nn.Module): """Crop the given image at a random location. If the image is torch Tensor, it is expected (重点:这里规定了输入的类型是 tensor) to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions, but if non-constant padding is used, the input is expected to have at most 2 leading dimensions Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. (若是只输入一个int,则不会像resize那样进行匹配,而是生成一个int,int的正方形 If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). Only int or str or tuple (元组) value is supported for PIL Image.(这里也制定了输入) ''' #首先,我们根据RandomCrop类 模板自定义函数 trans_random = transforms.RandomCrop(512) #预计结果是裁剪出 512*512 的图片 此时可以使用于 PIL #利用compose类 结合,先random 再 totensor 自定义函数 trans_compose_2 = transforms.Compose([trans_random,trans_totensor]) #对图像进行操作 利用for循环进行多个拆分 for i inrange(10): img_compose = trans_compose_2(img) #利用tensorboard 进行可视化 #写入图片 writer.add_image("compose",img_compose,i) #用 i 来控制步数 #close close close writer.close() #运行,之后在终端打开
#于是,我们就可以看见0-9十个步骤的图片
#下面指定大小 用元组 tuple () tran_random_2 = transforms.RandomCrop((256,256)) trans_compose_3 = transforms.Compose([tran_random_2,trans_totensor]) for i inrange(10): img_compose_tuple = trans_compose_3(img) writer.add_image('compose_tuple(,)',img_compose_tuple,i) #运行,打开
classCIFAR10(VisionDataset): """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. Args: root (string): Root directory of dataset where directory 数据集存放在哪里,并给一个命名一般用相对路径 ”./dataset2“ ``cifar-10-batches-py`` exists or will be saved to if download is set to True. train (bool, optional): If True, creates dataset from training set, otherwise creates from test set. 如果为True,则从训练集中创建数据集,否则从测试集创建 transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` 一种函数/变换,它接收PIL图像并返回变换后的版本。例如,``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. 接收目标并对其进行变换的函数/变换 download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. 如果为true,则从internet下载数据集并将其放在根目录中。如果数据集已经下载,则不会再次下载。 """
#我们先来学习一下如何使用数据集 ''' CIFAR数据集: CIFAR10 Dataset. dataset 数据集 Parameters root (string) – Root directory(目录) of dataset where directory cifar-10-batches-py exists or will be saved to if download is set to True. 数据集存放在哪里,并给一个命名一般用相对路径 ”./dataset“ train (bool, optional) – If True, creates dataset from training set, otherwise creates from test set. true 代表训练集 false 代表验证集(测试集) transform (callable, optional) – A function/transform that takes in an PIL image and returns a transformed version. E.g, transforms.RandomCrop target_transform (callable, optional) – A function/transform that takes in the target and transforms it. download (bool, optional) – If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. 所以一般建议,这个参数始终保持为 True #如果你从迅雷下载(只是一个压缩包),并导入了数据集,它可以帮助进行校验,然后解压缩 '''
#现在,我们来使用调用这个数据集 import torchvision from torch.utils.tensorboard import SummaryWriter
classDataLoader(Generic[T_co]): r""" Data loader. Combines a dataset and a sampler, and provides an iterable over the given dataset. The :class:`~torch.utils.data.DataLoader` supports both map-style and iterable-style datasets with single- or multi-process loading, customizing loading order and optional automatic batching (collation) and memory pinning. See :py:mod:`torch.utils.data` documentation page for more details. Args: dataset (Dataset): dataset from which to load the data.从中加载数据的数据集。 batch_size (int, optional): how many samples per batch to load 每批要装载多少个样品,注意是从样本集里随机抽取 (default: ``1``). shuffle (bool, optional): set to ``True`` to have the data reshuffled 设置为“True”以重新整理数据,理解为洗牌 at every epoch (default: ``False``). sampler (Sampler or Iterable, optional): defines the strategy to draw samples from the dataset. Can be any ``Iterable`` with ``__len__`` implemented. If specified, :attr:`shuffle` must not be specified. batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but returns a batch of indices at a time. Mutually exclusive with :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`. num_workers (int, optional): how many subprocesses to use for data 多进程还是单进程 loading. ``0`` means that the data will be loaded in the main process. (default: ``0``) collate_fn (Callable, optional): merges a list of samples to form a mini-batch of Tensor(s). Used when using batched loading from a map-style dataset. pin_memory (bool, optional): If ``True``, the data loader will copy Tensors into device/CUDA pinned memory before returning them. If your data elements are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type, see the example below. drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, 设置为“True”以删除最后一个未完成的批次 if the dataset size is not divisible by the batch size. If ``False`` and the size of dataset is not divisible by the batch size, then the last batch will be smaller. (default: ``False``) timeout (numeric, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative. (default: ``0``) worker_init_fn (Callable, optional): If not ``None``, this will be called on each worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data loading. (default: ``None``) generator (torch.Generator, optional): If not ``None``, this RNG will be used by RandomSampler to generate random indexes and multiprocessing to generate `base_seed` for workers. (default: ``None``) prefetch_factor (int, optional, keyword-only arg): Number of batches loaded in advance by each worker. ``2`` means there will be a total of 2 * num_workers batches prefetched across all workers. (default: ``2``) persistent_workers (bool, optional): If ``True``, the data loader will not shutdown the worker processes after a dataset has been consumed once. This allows to maintain the workers `Dataset` instances alive. (default: ``False``) pin_memory_device (str, optional): the data loader will copy Tensors into device pinned memory before returning them if pin_memory is set to true. .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an unpicklable object, e.g., a lambda function. See :ref:`multiprocessing-best-practices` on more details related to multiprocessing in PyTorch. .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used. When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`, it instead returns an estimate based on ``len(dataset) / batch_size``, with proper rounding depending on :attr:`drop_last`, regardless of multi-process loading configurations. This represents the best guess PyTorch can make because PyTorch trusts user :attr:`dataset` code in correctly handling multi-process loading to avoid duplicate data. However, if sharding results in multiple workers having incomplete last batches, this estimate can still be inaccurate, because (1) an otherwise complete batch can be broken into multiple ones and (2) more than one batch worth of samples can be dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such cases in general. See `Dataset Types`_ for more details on these two types of datasets and how :class:`~torch.utils.data.IterableDataset` interacts with `Multi-process data loading`_. .. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and :ref:`data-loading-randomness` notes for random seed related questions. """
num_workers (int, optional): how many subprocesses to use for data 多进程还是单进程 loading. ``0`` means that the data will be loaded in the main process. (default: ``0``)
示例
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
import torchvision from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter
for epoch inrange(2): #这里epoch的值就是0,1 仅仅表示训练两遍,并且配合"Epoch:{}".format(epoch) 实现标识区分 step=0 for data in test_loader: imgs,targets = data writer.add_images("Epoch:{}".format(epoch),imgs,step) step += 1 writer.close()
#首先说一下dataset 和 dataloader 之间的联系 #dataset是整合数据 #dataloader是加载数据,取数据 以便于直接引入到神经网络的训练之中 ''' torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, num_workers=0, collate_fn=None, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None, multiprocessing_context=None, generator=None, *, prefetch_factor=2, persistent_workers=False)[source] 我们可以很明显的看出,这里的参数大部分都有默认值,所以我们暂且挑一点经常使用的进行讲解 Data loader. Combines a dataset and a sampler(采样器), and provides an iterable(可迭代的) over the given dataset. The DataLoader supports both map-style and iterable-style datasets with single- or multi-process loading, customizing(自定义) loading order(顺序) and optional automatic batching (collation) and memory pinning(固定). 自定义加载顺序和可选的自动批处理(整理)和内存固定。 See torch.utils.data documentation page for more details. ''' #现在,我们来使用dataloader import torchvision #首先,准备测试数据集 from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter
#我们来用 epoch技巧 来玩一下shuffle 首先要把shuffle改为 False for epoch inrange(2): #这里epoch的值就是0,1 仅仅表示训练两遍,并且配合"Epoch:{}".format(epoch) 实现标识 step=0 for data in test_loader: imgs,targets = data writer.add_images("Epoch:{}".format(epoch),imgs,step) step += 1 writer.close()
''' def format(self, *args, **kwargs): # known special case of str.format """ S.format(*args, **kwargs) -> str Return a formatted(格式化) version of S, using substitutions(替换) from args and kwargs. The substitutions are identified by braces(大括号) ('{' and '}'). """ 比如之上的那个例子,就会像大括号里传入 0 或 1 pass '''
classModule: r"""Base class for all neural network modules.所有神经网络模块的基类。 Your models should also subclass this class.您的模型还应该将这个类划分为子类。 Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes:: 模块也可以包含其他模块,允许将它们嵌套在树形结构。您可以将子模块指定为常规属性: import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x)) Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:`to`, etc. 以这种方式分配的子模块将被注册,并且调用时也转换了参数: .. note:: As per the example above, an ``__init__()`` call to the parent class must be made before assignment on the child. 根据上面的例子,对父类的`__init__()``调用必须在分配给孩子之前进行。 :ivar training: Boolean represents whether this module is in training or evaluation mode. ivar训练:布尔值表示该模块是否正在训练或评估模式。 :vartype training: bool """
# _*_coding:utf-8_*_ #本节我们来学习一下关于神经网络的搭建 #位置在于torch.nn_containers_module #containers 容器 module 模块 ''' Class torch.nn.module Base class for all neural network modules.(对于所有的神经网络的一个基础的模块) Your models should also subclass this class.(继承这个子类) Modules can also contain other Modules, allowing to nest(嵌套) them in a tree structure(结构). You can assign(分配) the submodules as regular attributes(属性): 这是一个例子: import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): Model 是模型的名字 (nn.module 是继承这个类 模板) def __init__(self): 初始化部分 super(Model, self).__init__() (这是必须要的,调用的初始化函数 表示引用nn.module 进行一个声明) 后面这些是有我们来设置的一些自定义的数值 要传送到神经网络中的 self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): (forword 向前的 表示前向传播网络) 下面就是网络的运用 x = F.relu(self.conv1(x)) conv1 第一次卷积 然后 relu 进行一个非线性化操作 return F.relu(self.conv2(x)) conv2 第二次卷积 然后 relu 进行非线性化操作 然后返回值 这就是一个简单的,根据nn.module 类 模板,自定义的神经网络Model 的一个例子 ''' #下面,我们来根据前面的例子,设计我们的第一个神经网络 #以此来熟悉一下 nn.module 的使用方法 #首先,根据类 模板,自定义自己的神经网络 import torch from torch import nn
In some circumstances when given tensors on a CUDA device and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is undesirable, you can try to make the operation deterministic (potentially at a performance cost) by setting torch.backends.cudnn.deterministic = True. See Reproducibility for more information.
NOTE:
This operator supports complex data types i.e. complex32, complex64, complex128.
#本节我们来学习卷积操作 convolution #首先来介绍一下torch.nn 和 torch.nn.functinal 的区别 #这是已经整合好的 相当于方向盘 #这是没有整合的,相当于一个个的齿轮 #我们讲的是 torch.nn_convolutinal layers #nn.Conv1d 表示是一维的 #nn.Conv2d 表示是二维的 我们主要利用这个 ''' Conv2d class torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', device=None, dtype=None) Applies a 2D convolution over an input signal 信号 composed of 多个 several input planes 平面. stride 步 卷积核移动的长度 controls the stride for the cross-correlation, a single number or a tuple. padding 填充 controls the amount of padding applied to the input. It can be either a string {‘valid’, ‘same’} or a tuple of ints giving the amount of implicit padding applied on both sides. 填充一般为0 dilation controls the spacing between the kernel points; also known as the à trous algorithm. It is harder to describe, but this link has a nice visualization of what dilation does. groups controls the connections between inputs and outputs. in_channels and out_channels must both be divisible by groups. For example, At groups=1, all inputs are convolved to all outputs. At groups=2, the operation becomes equivalent to having two conv layers side by side, each seeing half the input channels and producing half the output channels, and both subsequently concatenated. Parameters in_channels (int) – Number of channels in the input image out_channels (int) – Number of channels produced by the convolution kernel_size (int or tuple) – Size of the convolving kernel 内核 卷积核 stride (int or tuple, optional) – Stride of the convolution. Default: 1 移动步长 padding (int, tuple or str, optional) – Padding added to all four sides of the input. Default: 0 填充 padding_mode (string, optional) – 'zeros', 'reflect', 'replicate' or 'circular'. Default: 'zeros' dilation (int or tuple, optional) – Spacing between kernel elements. Default: 1 groups (int, optional) – Number of blocked connections from input channels to output channels. Default: 1 bias (bool, optional) – If True, adds a learnable bias to the output. Default: True '''
#下面我们来实际使用一下卷积操作】 #首先输入要进行卷积的tensor 张量 import torch import torch.nn.functional as F
import torch import torchvision from torch import nn from torch.nn import Conv2d from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter
''' MaxPool2d class torch.nn.MaxPool2d(kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False) Parameters kernel_size – the size of the window to take a max over stride – the stride of the window. Default value is kernel_size 默认步长为核的大小 padding – implicit zero padding to be added on both sides dilation – a parameter that controls the stride of elements in the window 棋盘卷积操作 也就是 四周都隔一个数值进行操作 (很少会用到) return_indices – if True, will return the max indices along with the outputs. Useful for torch.nn.MaxUnpool2d later ceil_mode – when True, will use ceil instead of floor to compute the output shape Ceiling 和 Floor 相当对应 ceil 天花板 在小数取舍的时候 表示向上取整 到相对的 天花板,而在这里表示的意思是,若不满足核的大小,也保留一个数值 floor 地板 在小数取舍的时候 表示向下取整 到相对的 地板,而在这里表示的是,若不满足核的大小,就直接舍弃,不取值 下面有具体的例子的讲解 '''
#下面,我们具体使用一下池化的操作 import torch from torch import nn from torch.nn import MaxPool2d
# _*_coding:utf-8_*_ #本节我们学习Pooling Layers #本部分,我们对数据集进行池化操作 import torchvision #引入数据集 from torch import nn from torch.nn import MaxPool2d from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter
''' class torch.nn.ReLU(inplace=False)[source] Applies 应用 the rectified linear 线性整流 unit function 单位函数 element-wise 逐元素: Parameters inplace – can optionally do the operation in-place. Default: False 直接代换,一般设置为 False input = -1 input = -1 Relu(input.inplace=True) Relu(input.inplace=False) input = 0 input = -1 output = 0 Shape: Input: (∗)(*)(∗), where ∗*∗ means any number of dimensions. 很自由 没啥要求 Output: (∗)(*)(∗), same shape as the input. '''
#下面,我们来具体操作非线性激活函数 import torch from torch import nn #输入数据 from torch.nn import ReLU
import torch import torchvision from torch import nn from torch.nn import Linear from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter
for data in dataloader: imgs,targets = data print(imgs.shape) output = torch.flatten(imgs) print(output.shape) output = xiao(output) print(output.shape)
# _*_coding:utf-8_*_ #本节我们来学习线性性层,并对其他层做一个简单的介绍 #Normalization Layers 正则化 也叫标准化层 ''' BatchNorm2d class torch.nn.BatchNorm2d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, device=None, dtype=None)[source] Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional 额外的 channel dimension 维度) as described 描述 in the paper Batch Normalization: Accelerating 加速 Deep Network Training by Reducing Internal Covariate Shift 减少内部协变量偏移 . Parameters num_features – CCC from an expected input of size (N,C,H,W)(N, C, H, W)(N,C,H,W) 输入格式 注意 eps – a value added to the denominator for numerical stability. Default: 1e-5 momentum – the value used for the running_mean and running_var computation. Can be set to None for cumulative moving average (i.e. simple average). Default: 0.1 affine – a boolean value that when set to True, this module has learnable affine parameters. Default: True track_running_stats – a boolean value that when set to True, this module tracks the running mean and variance, and when set to False, this module does not track such statistics, and initializes statistics buffers running_mean and running_var as None. When these buffers are None, this module always uses batch statistics. in both training and eval modes. Default: True Examples: # # >>> # With Learnable Parameters 具有可学习的参数 # >>> m = nn.BatchNorm2d(100) # >>> # Without Learnable Parameters 没有可学习的参数 # >>> m = nn.BatchNorm2d(100, affine=False) # >>> input = torch.randn(20, 100, 35, 45) # >>> output = m(input) 这里就是做了一个标准化 ''' ''' Recurrent 经常性的 Layers 多特用于文字识别领域 Transformer 变压器 转换 Layers 多用于nlp Dropout 主要是为了防止产生过拟合 可以随机的将输入的tensor中的元素变为0 Layers Sparse 稀疏 Layers 主要应用于自然语言处理中的 稀疏化 '''
''' 我们重点来介绍一下线性层 Linear class torch.nn.Linear(in_features, out_features, bias=True, device=None, dtype=None)[source] Applies a linear transformation to the incoming data: y=xAT+by = xA^T + by=xAT+b y = w * x + b 类似于感知器 This module supports TensorFloat32. Parameters in_features – size of each input sample 输入层中元素的大小 out_features – size of each output sample 输出层中元素的大小 bias – If set to False, the layer will not learn an additive bias. Default: True 是否增加偏置 也就是相当于 将数据从一个长度 转化为 另外的一个长度 ''' import torch import torchvision from torch import nn from torch.nn import Linear from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter
import torch from torch import nn from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential from torch.utils.tensorboard import SummaryWriter
classxiaoxiao(nn.Module): def__init__(self): super(xiaoxiao, self).__init__() self.conv1 = Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2) self.maxpool1 = MaxPool2d(2) self.conv2 = Conv2d(in_channels=32,out_channels=32,kernel_size=5,stride=1,padding=2) self.maxpool2 = MaxPool2d(2) self.conv3 =Conv2d(in_channels=32,out_channels=64,kernel_size=5,stride=1,padding=2) self.maxpool3 = MaxPool2d(2) self.flatten = Flatten() self.linear_1 = Linear(in_features=1024,out_features=64) self.linear_2 = Linear(in_features=64,out_features=10) defforward(self,x): x = self.conv1(x) x = self.maxpool1(x) x = self.conv2(x) x = self.maxpool2(x) x = self.conv3(x) x = self.maxpool3(x) x = self.flatten(x) x = self.linear_1(x) x = self.linear_2(x) return x
# _*_coding:utf-8_*_ #本节我们来学习一下 containers 中的 sequential 部分,并且来搭建一个简单的网络 ''' sequential: sequential 顺序的 简单来说就是将神经网络整合到一起 更方便的表示 Example: # Using Sequential to create a small model. When `model` is run, # input will first be passed to `Conv2d(1,20,5)`. The output of # `Conv2d(1,20,5)` will be used as the input to the first # `ReLU`; the output of the first `ReLU` will become the input # for `Conv2d(20,64,5)`. Finally, the output of # `Conv2d(20,64,5)` will be used as input to the second `ReLU` model = nn.Sequential( nn.Conv2d(1,20,5), nn.ReLU(), nn.Conv2d(20,64,5), nn.ReLU() ) # Using Sequential with OrderedDict. This is functionally the # same as the above code model = nn.Sequential(OrderedDict([ ('conv1', nn.Conv2d(1,20,5)), ('relu1', nn.ReLU()), ('conv2', nn.Conv2d(20,64,5)), ('relu2', nn.ReLU()) ])) '''
#用原始的方式来构建神经网络 import torch from torch import nn from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential from torch.utils.tensorboard import SummaryWriter
classxiaoxiao(nn.Module): def__init__(self): super(xiaoxiao, self).__init__() self.conv1 = Conv2d(in_channels=3,out_channels=32,kernel_size=5,padding=2,stride=1) self.maxpool1 = MaxPool2d(2) self.conv2 = Conv2d(in_channels=32,out_channels=32,kernel_size=5,stride=1,padding=2) self.maxpool2 = MaxPool2d(2) self.conv3 =Conv2d(in_channels=32,out_channels=64,kernel_size=5,stride=1,padding=2) self.maxpool3 = MaxPool2d(2) self.flatten = Flatten() self.linear_1 = Linear(in_features=1024,out_features=64) self.linear_2 = Linear(in_features=64,out_features=10) defforward(self,x): x = self.conv1(x) x = self.maxpool1(x) x = self.conv2(x) x = self.maxpool2(x) x = self.conv3(x) x = self.maxpool3(x) x = self.flatten(x) x = self.linear_1(x) x = self.linear_2(x) return x #使用此神经网络 xiao = xiaoxiao() #来输出一下这个网络,看看情况 print(xiao) #我们来使用pytorch 内置的一些张量设置方式来 测试一下这个神经网络的正确与否 input = torch.ones((64,3,32,32)) output = xiao(input) print(output.shape)
#报错了:RuntimeError: mat1 and mat2 shapes cannot be multiplied (64x4096 and 1024x64) #linear中错了 #所以我们在网络的forward之中删掉 linear 运行一下 来找一下linear 的输入 #torch.Size([64, 4096]) 哦,是4096 为什么不是1024呢? #还是用它计算出来的值吧,4096 也就说 展开后是 64张 4096 的图片
import torch import torchvision import winnt from torch import nn from torch.nn import L1Loss, MSELoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader
import torch import torchvision import winnt from torch import nn from torch.nn import L1Loss, MSELoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader
import torch import torchvision import winnt from torch import nn from torch.nn import L1Loss, MSELoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader
import torch import torchvision import winnt from torch import nn from torch.nn import L1Loss, MSELoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader
import torch import torchvision import winnt from torch import nn from torch.nn import L1Loss, MSELoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader
import torch import torchvision import winnt from torch import nn from torch.nn import L1Loss, MSELoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader
# _*_coding:utf-8_*_ #本节我们学习损失函数,反向传播 #损失函数:输出 和 taget 目标 之间的差异 #能为我们更新输出提供一定的依据(反向传播) ''' L1Loss class torch.nn.L1Loss(size_average=None, reduce=None, reduction='mean')[source] 都有默认值,所以暂且直接使用吧 Creates a criterion 标准 that measures 计算 the mean absolute error 平均绝对误差 (MAE) between each element 元素 in the input xxx and target yyy. Shape: Input: (∗)(*)(∗), where ∗*∗ means any number of dimensions. Target: (∗)(*)(∗), same shape as the input. Output: scalar. If reduction is 'none', then (∗)(*)(∗), same shape as the input. Examples: # # >>> loss = nn.L1Loss() # >>> input = torch.randn(3, 5, requires_grad=True) # >>> target = torch.randn(3, 5) # >>> output = loss(input, target) # >>> output.backward() '''
#操作: import torch import torchvision import winnt from torch import nn from torch.nn import L1Loss, MSELoss, Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch.utils.data import DataLoader
loss = L1Loss(reduction="sum") #这其中有一个参数叫 reduction 减少 ,用它可以来选择不同的 计算方法 这里有 sum 和 mean default= mean result = loss(inputs,targets) print(result)
#报错了: # RuntimeError: Can only calculate the mean of floating types. Got Long instead. #用dtype 继续转化为 float32
#结果: # tensor(0.6667) # tensor(2.) 用 sum 的方式来计算 ''' MSELoss class torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')[source] Creates a criterion that measures the mean 平均 squared 平方 error 差 错误 (squared L2 norm) between each element in the input xxx and target yyy. Parameters size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True reduction (string, optional) – Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the sum of the output will be divided by the number of elements in the output, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean' 都有默认值,我们暂且直接使用好了 Shape: Input: (∗)(*)(∗), where ∗*∗ means any number of dimensions. Target: (∗)(*)(∗), same shape as the input. Examples: >>> loss = nn.MSELoss() >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.randn(3, 5) >>> output = loss(input, target) >>> output.backward() '''
'''' CrossEntropyLoss class torch.nn.CrossEntropyLoss(weight=None, size_average=None, ignore_index=- 100, reduce=None, reduction='mean', label_smoothing=0.0)[source] This criterion 标准 computes the cross entropy 交叉熵 loss between input and target. Parameters weight (Tensor, optional) – a manual rescaling weight given to each class. If given, has to be a Tensor of size C 权重 size_average (bool, optional) – Deprecated (see reduction). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False. Default: True ignore_index (int, optional) – Specifies a target value that is ignored and does not contribute to the input gradient. When size_average is True, the loss is averaged over non-ignored targets. Note that ignore_index is only applicable when the target contains class indices. reduce (bool, optional) – Deprecated (see reduction). By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per batch element instead and ignores size_average. Default: True reduction (string, optional) – Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'none': no reduction will be applied, 'mean': the weighted mean of the output is taken, 'sum': the output will be summed. Note: size_average and reduce are in the process of being deprecated, and in the meantime, specifying either of those two args will override reduction. Default: 'mean' label_smoothing (float, optional) – A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets become a mixture of the original ground truth and a uniform distribution as described in Rethinking the Inception Architecture for Computer Vision. Default: 0.00.00.0. 也是都有默认值,所以我们暂且先就这样使用一下吧 Shape: Input: (N,C)(N, C)(N,C) where C = number of classes, or (N,C,d1,d2,...,dK) (N, C, d_1, d_2, ..., d_K)(N,C,d1,d2,...,dK) with K≥1K \geq 1K≥1 in the case of K-dimensional loss. 注意,这里的输入为 n c n是 batch_size c是 通道数 Target: If containing class indices, shape (N)(N)(N) where each value is 0≤targets[i]≤C−10 \leq \text{targets}[i] \leq C-10≤targets[i]≤C−1, or (N,d1,d2,...,dK)(N, d_1, d_2, ..., d_K)(N,d1,d2,...,dK) with K≥1K \geq 1K≥1 in the case of K-dimensional loss. If containing class probabilities, same shape as the input. Output: If reduction is 'none', shape (N)(N)(N) or (N,d1,d2,...,dK)(N, d_1, d_2, ..., d_K) (N,d1,d2,...,dK) with K≥1K \geq 1K≥1 in the case of K-dimensional loss. Otherwise, scalar. '''
x = torch.tensor([0.1,0.2,0.3]) y = torch.tensor([1]) x = torch.reshape(x,(1,3)) loss_cross = nn.CrossEntropyLoss() result_cross = loss_cross(x,y) print(result_cross) #结果: # tensor(1.1019)
import torchvision from torch import nn from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch import optim from torch.utils.data import DataLoader
#cv import torchvision from d2l import torch from torch import nn from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear from torch import optim from torch.utils.data import DataLoader
#设置网络模型的一些参数 #记录训练的次数 total_train_step = 0 #记录验证的次数 total_test_step = 0 #训练的轮数 epoch = 10 for i inrange(epoch): print("-----第{}轮训练开始-----".format(i+1)) #为了符合阅读习惯 这里要加一
#训练步骤 for data in train_load: imgs,targets = data outputs = xiao(imgs) loss = loss_fun(outputs,targets) #优化步骤 optimizer.zero_grad() loss.backward() optimizer.step()
#每训练完一轮,要测试一下,求一下 loss ,来看训练的效果 total_test_loss = 0 with torch.no_grad(): #这只是测试,所以要确保不进行梯度的改动 for data in test_load: #从测试集取数据 imgs,targets = data outputs = xiao(imgs) loss = loss_fun(outputs,targets) total_test_loss += loss print("整体测试集上的Loss:{}".format(total_test_loss))
#设置网络模型的一些参数 #记录训练的次数 total_train_step = 0 #记录验证的次数 total_test_step = 0 #训练的轮数 epoch = 10 for i inrange(epoch): print("-----第{}轮训练开始-----".format(i+1)) #为了符合阅读习惯 这里要加一
#训练步骤 xiao.train() for data in train_load: imgs,targets = data outputs = xiao(imgs) loss = loss_fun(outputs,targets) #优化步骤 optimizer.zero_grad() loss.backward() optimizer.step()
total_train_step += 1 if total_train_step % 100 == 0: #优化一 print("训练次数:{} loss;{}".format(total_train_step,loss.item())) #.item() 可以将tensor 类型 转化为 单纯的数 int/float writer.add_scalar("train_loss",loss.item(),global_step=total_train_step) #每训练完一轮,要测试一下,求一下 loss ,来看训练的效果 xiao.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): #这只是测试,所以要确保不进行梯度的改动 for data in test_load: #从测试集取数据 imgs,targets = data outputs = xiao(imgs) loss = loss_fun(outputs,targets) total_test_loss += loss.item() accuracy = (outputs.argmax(1) == targets).sum() #等于就表示为1 不能于就表示为0 total_accuracy += accuracy print("整体测试集上的Loss:{}".format(total_test_loss)) print("整体测试的正确率:{}".format(total_accuracy/test_data_len)) #正确率 等于 正确的除以总体的个数 writer.add_scalar("total_test_loss",scalar_value=total_test_loss,global_step=total_test_step) writer.add_scalar("total_accuracy",scalar_value=total_accuracy,global_step=total_test_step) total_test_step += 1#测试一次要加一 每轮测试一次 测试的次数和训练的轮数是相同的
#设置网络模型的一些参数 #记录训练的次数 total_train_step = 0 #记录验证的次数 total_test_step = 0 #训练的轮数 epoch = 10 for i inrange(epoch): print("-----第{}轮训练开始-----".format(i+1)) #为了符合阅读习惯 这里要加一
#训练步骤 xiao.train() for data in train_load: imgs,targets = data if torch.cuda.is_available(): imgs = imgs.cuda() targets = targets.cuda() outputs = xiao(imgs) loss = loss_fun(outputs,targets) #优化步骤 optimizer.zero_grad() loss.backward() optimizer.step()
total_train_step += 1 if total_train_step % 100 == 0: #优化一 print("训练次数:{} loss;{}".format(total_train_step,loss.item())) #.item() 可以将tensor 类型 转化为 单纯的数 int/float writer.add_scalar("train_loss",loss.item(),global_step=total_train_step) #每训练完一轮,要测试一下,求一下 loss ,来看训练的效果 xiao.eval() total_test_loss = 0 total_accuracy = 0 with torch.no_grad(): #这只是测试,所以要确保不进行梯度的改动 for data in test_load: #从测试集取数据 imgs,targets = data if torch.cuda.is_available(): imgs = imgs.cuda() targets = targets.cuda() outputs = xiao(imgs) loss = loss_fun(outputs,targets) total_test_loss += loss.item() accuracy = (outputs.argmax(1) == targets).sum() #等于就表示为1 不能于就表示为0 total_accuracy += accuracy print("整体测试集上的Loss:{}".format(total_test_loss)) print("整体测试的正确率:{}".format(total_accuracy/test_data_len)) #正确率 等于 正确的除以总体的个数 writer.add_scalar("total_test_loss",scalar_value=total_test_loss,global_step=total_test_step) writer.add_scalar("total_accuracy",scalar_value=total_accuracy,global_step=total_test_step) total_test_step += 1#测试一次要加一 每轮测试一次 测试的次数和训练的轮数是相同的