第一片代码model_CT.py用于G和D的构造

# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 20:33:14 2018
E-mail: Eric2014_Lv@sjtu.edu.cn
@author: DidiLv
"""

import tensorflow as tf
import numpy as np

# pooling and convolution definition
def conv2d(x, W):
    return tf.nn.conv2d(input = x, filter = W, strides = [1,1,1,1], padding = 'SAME')

def avg_pool_2x2(x):
    return tf.nn.avg_pool(x, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')

def xavier_init(size):
    in_dim = size[0]
    xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
    return tf.random_normal(shape=size, stddev=xavier_stddev)
def sample_z(shape):
    return np.random.uniform(-1., 1., size=shape)

# discriminator
def discriminator(x_image, reuse=False):
    with tf.variable_scope('discriminator') as scope:
        if (reuse):
            tf.get_variable_scope().reuse_variables()
        #First Conv and Pool Layers
        W_conv1 = tf.get_variable('d_wconv1', shape = [5, 5, 1, 8], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv1 = tf.get_variable('d_bconv1', shape = [8], initializer=tf.constant_initializer(0))
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)        
        h_pool1 = avg_pool_2x2(h_conv1)

#Second Conv and Pool Layers
        W_conv2 = tf.get_variable('d_wconv2', shape = [5, 5, 8, 16], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv2 = tf.get_variable('d_bconv2', shape = [16], initializer=tf.constant_initializer(0))
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
        h_pool2 = avg_pool_2x2(h_conv2)
        ##----------------------------------------------------------------------------->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        #Third Conv and Pool Layers
        W_conv3 = tf.get_variable('d_wconv3', shape = [5, 5, 16, 32], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv3 = tf.get_variable('d_bconv3', shape = [32], initializer=tf.constant_initializer(0))
        h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
        h_pool3 = avg_pool_2x2(h_conv3)

W_conv4 = tf.get_variable('d_wconv4', shape = [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_conv4 = tf.get_variable('d_bconv4', shape = [64], initializer=tf.constant_initializer(0))
        h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
        h_pool4 = avg_pool_2x2(h_conv4)

#First Fully Connected Layer
        W_fc1 = tf.get_variable('d_wfc1', [14 * 12 * 64, 320], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_fc1 = tf.get_variable('d_bfc1', [320], initializer=tf.constant_initializer(0))
        h_pool4_flat = tf.reshape(h_pool4, [-1, 14 * 12 * 64]) # reshape the tensor into vector form
        h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)

#Second Fully Connected Layer
        W_fc2 = tf.get_variable('d_wfc2', [320, 80], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_fc2 = tf.get_variable('d_bfc2', [80], initializer=tf.constant_initializer(0))
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)

#Third Fully Connected Layer
        W_fc3 = tf.get_variable('d_wfc3', [80, 1], initializer=tf.truncated_normal_initializer(stddev=0.02))
        b_fc3 = tf.get_variable('d_bfc3', [1], initializer=tf.constant_initializer(0))

#Final Layer
        y_conv=(tf.matmul(h_fc2, W_fc3) + b_fc3)
    return y_conv

# generator from DCGAN, take a d-dimensional vector as input and upsample it to become a 28*28 image
# the structure is from https://arxiv.org/pdf/1511.06434v2.pdf
def generator(z, batch_size, z_dim, reuse = False):
    with tf.variable_scope('generator') as scope:
        if (reuse):
            tf.get_variable_scope().reuse_variables()
        ## number of filters for the first layer of generator 
        g_dim = 64
        ## color dimension of output 
        c_dim = 1
        ## size of output image
        s_w = 221
        s_h = 181
        s_w2, s_w4, s_w8, s_w16, s_w32, s_w64 = int(s_w/2), int(s_w/4), int(s_w/8), int(s_w/16), int(s_w/32), int(s_w/64)
        s_h2, s_h4, s_h8, s_h16, s_h32, s_h64 = int(s_h/2), int(s_h/4), int(s_h/8), int(s_h/16), int(s_h/32), int(s_h/64)

# h0 dimension is [batch_size, z_width, z_height, z_channel] 
        h0 = tf.reshape(z, [batch_size, s_w64+1, s_h64+1, g_dim]) # s_w64, s_h64 = [3,2] --> 4*3*64
        h0 = tf.nn.relu(h0)
        ##Dimensions of h0 = batch_size x 4 x 3 x 64 = batch_size*768

# first decovolution layer (fractionally-strided convolution layer)

## useful link for convolution :
        ## https://blog.csdn.net/mao_xiao_feng/article/details/71713358
        output1_shape = [batch_size, s_w32+1, s_h32+1, c_dim*256] # s_w32, s_h32 = [6,5] --> 7*6*256
        ## W_conv1 shape = [filter_height, filter_width, out_channels, in_channels]
        W_conv1 = tf.get_variable('g_wconv1', shape = [5,5,output1_shape[-1],int(h0.get_shape()[-1])],
                                    initializer=tf.truncated_normal_initializer(stddev = 0.1)
                                    )
        b_conv1 = tf.get_variable('g_bconv1', shape = [output1_shape[-1]], initializer=tf.constant_initializer(.1))
        ## H_conv1: h0 * W_conv1.T
        H_conv1 = tf.nn.conv2d_transpose(h0, W_conv1, output_shape = output1_shape, strides = [1,2,2,1], 
                                         padding = 'SAME')
        H_conv1 = tf.add(H_conv1, b_conv1)
        H_conv1 = tf.contrib.layers.batch_norm(inputs = H_conv1, center=True, scale=True, is_training=True, scope="g_bn1")
        H_conv1 = tf.nn.relu(H_conv1)
        ##Dimensions of H_conv1 = batch_size x 7 x 6 x ?

# second deconvolution layer
        output2_shape = [batch_size, s_w16+1, s_h16+1, c_dim*128] # s_w16, s_h16 = [13,11] --> 14*12*?
        W_conv2 = tf.get_variable('g_wconv2', shape = [5,5,output2_shape[-1], int(H_conv1.get_shape()[-1])],
                                  initializer=tf.truncated_normal_initializer(stddev = 0.1))
        b_conv2 = tf.get_variable('g_bconv2', shape = [output2_shape[-1]], initializer=tf.truncated_normal_initializer(0.1))
        H_conv2 = tf.nn.conv2d_transpose(H_conv1, W_conv2, output_shape = output2_shape, strides = [1,2,2,1],
                               padding = 'SAME')
        H_conv2 = tf.add(H_conv2, b_conv2)
        H_conv2 = tf.contrib.layers.batch_norm(inputs = H_conv2, center=True, scale=True, is_training=True, scope="g_bn2")    
        ##Dimensions of H_conv2 = batch_size x 14 x 12 x ?
        H_conv2 = tf.nn.relu(H_conv2)

#third DeConv Layer
        output3_shape = [batch_size, s_w8+1, s_h8+1, c_dim*64] # s_w8, s_h8 = [27, 22] --> 28*23*?
        W_conv3 = tf.get_variable('g_wconv3', [5, 5, output3_shape[-1], int(H_conv2.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv3 = tf.get_variable('g_bconv3', [output3_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv3 = tf.nn.conv2d_transpose(H_conv2, W_conv3, output_shape=output3_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv3 = tf.add(H_conv3, b_conv3)
        H_conv3 = tf.contrib.layers.batch_norm(inputs = H_conv3, center=True, scale=True, is_training=True, scope="g_bn3")
        H_conv3 = tf.nn.relu(H_conv3)
        #Dimensions of H_conv3 = batch_size x 28 x 23 x ?

#forth DeConv Layer
        output4_shape = [batch_size, s_w4+1, s_h4+1, c_dim*32] # s_w4, s_h8 = [55, 45] --> 56*46*?
        W_conv4 = tf.get_variable('g_wconv4', [5, 5, output4_shape[-1], int(H_conv3.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv4 = tf.get_variable('g_bconv4', [output4_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv4 = tf.nn.conv2d_transpose(H_conv3, W_conv4, output_shape=output4_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv4 = tf.add(H_conv4, b_conv4)
        H_conv4 = tf.contrib.layers.batch_norm(inputs = H_conv4, center=True, scale=True, is_training=True, scope="g_bn4")
        H_conv4 = tf.nn.relu(H_conv4)
        #Dimensions of H_conv3 = batch_size x 56 x 46 x ?

#fifth DeConv Layer
        output5_shape = [batch_size, s_w2+1, s_h2+1, c_dim*16] # s_w4, s_h8 = [110, 90] --> 111*91*?
        W_conv5 = tf.get_variable('g_wconv5', [5, 5, output5_shape[-1], int(H_conv4.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv5 = tf.get_variable('g_bconv5', [output5_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv5 = tf.nn.conv2d_transpose(H_conv4, W_conv5, output_shape=output5_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv5 = tf.add(H_conv5, b_conv5)
        H_conv5 = tf.contrib.layers.batch_norm(inputs = H_conv5, center=True, scale=True, is_training=True, scope="g_bn5")
        H_conv5 = tf.nn.relu(H_conv5)
        #Dimensions of H_conv3 = batch_size x 111 x 91 x ?

#Sixth DeConv Layer
        output6_shape = [batch_size, s_w, s_h, c_dim]
        W_conv6 = tf.get_variable('g_wconv6', [5, 5, output6_shape[-1], int(H_conv5.get_shape()[-1])], 
                                  initializer=tf.truncated_normal_initializer(stddev=0.1))
        b_conv6 = tf.get_variable('g_bconv6', [output6_shape[-1]], initializer=tf.constant_initializer(.1))
        H_conv6 = tf.nn.conv2d_transpose(H_conv5, W_conv6, output_shape=output6_shape, strides=[1, 2, 2, 1], 
                                         padding='SAME')
        H_conv6 = tf.add(H_conv6, b_conv6)
        H_conv6 = tf.nn.tanh(H_conv6)
        #Dimensions of H_conv4 = batch_size x 28 x 28 x 1
        return H_conv6

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
第二片代码data_generate_CT.py设计pipeline用于读取batch数据:
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 19 15:40:11 2018
E-mail: Eric2014_Lv@sjtu.edu.cn
@author: DidiLv
"""

import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt

def get_files(file_dir):
    lung_img = [];
    label_lung_img = [];
    for file in os.listdir(file_dir):
        lung_img.append( file_dir + file)
        label_lung_img.append(1)

image_list = np.hstack((lung_img))

label_list = np.hstack((label_lung_img))

temp = np.array([lung_img, label_lung_img]).T
    #利用shuffle打乱数据
    np.random.shuffle(temp)
    image_list = list(temp[:,0])
    label_list = list(temp[:,1])
    label_list = [int(i) for i in label_list]
    return image_list, label_list
#       
#将上面生成的List传入get_batch() ,转换类型,产生一个输入队列queue,因为img和lab  
#是分开的,所以使用tf.train.slice_input_producer(),然后用tf.read_file()从队列中读取图像
def get_batch(image,label,batch_size):

image_W, image_H = 221, 181

#将python.list类型转换成tf能够识别的格式
    image=tf.cast(image,tf.string)
    label=tf.cast(label,tf.int32)

#产生一个输入队列queue
    epoch_num = 50 #防止无限循环
    input_queue=tf.train.slice_input_producer([image,label], num_epochs=epoch_num)

label=input_queue[1]
    image_contents=tf.read_file(input_queue[0])
    #将图像解码,不同类型的图像不能混在一起,要么只用jpeg,要么只用png等。  
    image=tf.image.decode_png(image_contents,channels=1)

#将数据预处理,对图像进行旋转、缩放、裁剪、归一化等操作,让计算出的模型更健壮。
    image=tf.image.resize_image_with_crop_or_pad(image,image_W,image_H)
    image=tf.image.per_image_standardization(image)

#生成batch
    min_after_dequeue=1000
    capacity=min_after_dequeue+300*batch_size
    image_batch,label_batch=tf.train.shuffle_batch([image,label],batch_size=batch_size,num_threads=1024,capacity=capacity,min_after_dequeue=min_after_dequeue)

#重新排列标签,行数为[batch_size]
#    label_batch=tf.reshape(label_batch,[batch_size])
    image_batch = tf.reshape(image_batch,[batch_size,image_W,image_H,1])
    image_batch=tf.cast(image_batch,np.float32)

return image_batch, label_batch

if __name__ == "__main__":
    #训练样本在本地磁盘中的地址
    file_dir='D:\\CT_data\\Data_preprocessing\\' # 这里是输入数据的地址   
    image_list, label_list = get_files(file_dir)
    image_batch, label_batch = get_batch(image_list, label_list, 28)
    with tf.Session() as sess:
        ## 初始化工作
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        j = 0
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

try:
            while not coord.should_stop() and j<5: # 加上i其实是强制终止线程,但是文件队列线程并没有结束,因为我们产生的文件队列结束为epoch_num个epoch
#                for i in range(5):
                img, label = sess.run([image_batch, label_batch])

# just test one batch
#                for j in np.arange(64):
#                    print('label: %d' %label[j])
#                    plt.imshow(img[j,:,:,0])
#                    plt.show()
                plt.imshow(img[0,:,:,0])
                plt.show()
                j+=1
#                j = 1

except tf.errors.OutOfRangeError:
            print('done!')
        finally:
            coord.request_stop()
            print('-----------')
        coord.join(threads)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
第三片代码train_CT.py用于训练GAN
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 14:57:23 2018

@author: DidiLv
"""

# -*- coding: utf-8 -*-
"""
Created on Wed Jul 25 09:42:35 2018
E-mail: Eric2014_Lv@sjtu.edu.cn
@author: DidiLv
"""
import model_CT

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#import random
import data_generate_CT

#训练样本在本地磁盘中的地址
file_dir='D:\\CT_data\\Data_preprocessing\\' # 这里是输入数据的地址

tf.reset_default_graph()
batch_size = 10
image_W = 221
image_H = 181 
image_C = 1
z_dimensions = 4*3*64

image_list, label_list = data_generate_CT.get_files(file_dir)
image_batch, _ = data_generate_CT.get_batch(image_list, label_list, batch_size)
# reset the graph to reset all variables we test before

##---------------------------------------------------------------------------------------------
# discriminator for input
#x_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, 28, 28, 1])
#x_placeholder = tf.Variable(image_batch,dtype = tf.float32)# it's wrong if image_batch is already a tensor 
x_placeholder = image_batch
#z_placeholder = tf.placeholder(dtype = tf.float32, shape = [None,z_dimensions])
z_placeholder = tf.Variable(np.random.normal(-1, 1, size=[batch_size, z_dimensions]), dtype = tf.float32)
##--------------------------------------------------------------------------------------------

Dx = model_CT.discriminator(x_placeholder) # for real training data
Gz = model_CT.generator(z_placeholder, batch_size, z_dimensions)
Dg = model_CT.discriminator(Gz, reuse=True)

g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels=tf.ones_like(Dg)))
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dx, labels = tf.ones_like(Dx)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=Dg, labels = tf.zeros_like(Dx)))
d_loss = d_loss_real + d_loss_fake

tvars = tf.trainable_variables()
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]

with tf.variable_scope(tf.get_variable_scope(), reuse = False):
    # var_list: tf.Variable to update to minimize loss
    trainerD = tf.train.AdadeltaOptimizer(learning_rate = 1e-3).minimize(d_loss, var_list = d_vars)
    trainerG = tf.train.AdadeltaOptimizer(learning_rate = 1e-3).minimize(g_loss, var_list = g_vars)

iterations = 3000
##------------------------------------------------------------------------------------------------------------------------------------------------
#for i in range(iterations):
#    _,dLoss = sess.run([trainerD, d_loss]) #Update the discriminator
#    _,gLoss = sess.run([trainerG, g_loss]) #Update the generator
#    print((dLoss+gLoss))
##----------------------------------------------------------------------------------------------------------------------------------------------
with tf.Session() as sess:
        ## 初始化工作
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        i = 0
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

try:
            while not coord.should_stop() and i<iterations: # 加上i其实是强制终止线程,但是文件队列线程并没有结束,因为我们产生的文件队列结束为epoch_num个epoch                
                print(i)               
                _,dLoss = sess.run([trainerD, d_loss]) #Update the discriminator
                _,gLoss = sess.run([trainerG, g_loss]) #Update the generator
                print((dLoss+gLoss))
                i+=1

except tf.errors.OutOfRangeError:
            print('done!')
        finally:
            coord.request_stop()
            print('-----------')
        coord.join(threads)

--------------------- 
作者:Eric2016_Lv 
来源:CSDN 
原文:https://blog.csdn.net/eric2016_lv/article/details/81239585 
版权声明:本文为博主原创文章,转载请附上博文链接!

用DCGAN训练并生成自己的图像集(含tensorflow代码)相关推荐

  1. 生成对抗网络简介(包含TensorFlow代码示例)【翻译】

    判别模型 vs. 生成模型 示例:近似一维高斯分布 提高样本多样性 最后的思考 关于GAN的一些讨论 最近,大家对生成模型的兴趣又开始出现(OpenAI关于生成模型的案例).生成模型可以学习如何生成数 ...

  2. 【图像融合】基于随机游走算法实现多焦点图像融合含Matlab代码

    1 内容介绍 近几年来,随机游走模型(random walk)与引导滤波器(guided filter)在图像处理领域受到了研究者们的广泛关注.前者已经被应用于图像处理的多种领域--图像融合.图像平滑 ...

  3. 有趣的图像生成——使用DCGAN与pytorch生成动漫头像

    有趣的图像生成--使用DCGAN与pytorch生成动漫头像 文章目录 有趣的图像生成--使用DCGAN与pytorch生成动漫头像 一.源码下载 二.什么是DCGAN 三.DCGAN的实现 1.** ...

  4. 单张图像就可以训练GAN!Adobe改良图像生成方法 | 已开源

    十三 发自 凹非寺 量子位 报道 | 公众号 QbitAI 数据集太小了,无法训练GAN?试试从单个图像入手吧. 去年谷歌就提出了SinGAN,是第一个拿GAN在单幅自然图像学习的非条件生成模型(IC ...

  5. DL之DCGAN:基于keras框架利用深度卷积对抗网络DCGAN算法对MNIST数据集实现图像生成

    DL之DCGAN:基于keras框架利用深度卷积对抗网络DCGAN算法对MNIST数据集实现图像生成 目录 基于keras框架利用深度卷积对抗网络DCGAN算法对MNIST数据集实现图像生成 设计思路 ...

  6. 基于对抗生成网络的图像转换技术【论文笔记】

    前两篇文章是对方法的改进,详细地论证了提出的方法的可行性和优越性.后两篇是应用型,针对一个特定的问题提出方法. Image-to-Image Translation with Conditional ...

  7. GAN综述及其在图像生成领域的应用(含原理、代码详解)

    本文将持续更新. 目录 1. 基本GAN 1.1 GAN(2014) 1.2 CGAN(2015) 1.3 DCGAN(2015) 1.4 VAE-GAN(2016) 1.5 ACGAN(2017) ...

  8. ai模型去除图像衍射光斑_GAN生成的假脸太逼真了!别怕,十招教你识别AI生成的假图像...

    你能看出上面两张图片,哪张是真的,哪张是假的么?文摘菌是分不出来,太像了! 这种以假乱真的图片生成技术来源于一种左右互博术-生成对抗网络(GAN). 这一概念由机器学习研究者Ian Goodfello ...

  9. ai模型去除图像衍射光斑_业界 | GAN生成的假脸太逼真了!别怕,十招教你识别AI生成的假图像...

    大数据文摘出品 编译:张秋玥.蒋宝尚 你能看出上面两张图片,哪张是真的,哪张是假的么?文摘菌是分不出来,太像了! 这种以假乱真的图片生成技术来源于一种左右互博术-生成对抗网络(GAN). 这一概念由机 ...

最新文章

  1. javascript进阶教程第一章案例实战
  2. 数据结构源码笔记(C语言):直接插入排序
  3. 1016 Phone Bills (25 分) 【未完成】【难度: 中 / 知识点: 模拟】
  4. 基于Boost::beast模块的协程HTTP服务器
  5. dubbo管理控制台安装和使用
  6. jdbc_servlet基础增删改分页2(userinfo表的)
  7. 百度地图 开发 乡镇级区域显示_Tableau导入乡镇级地图进行数据展示
  8. Gitlab 项目上传
  9. python 输出纯音频_Python如何录制系统音频(扬声器的输出)?
  10. 一个男人具备什么样的条件,才能结婚?
  11. layer模态窗简单使用
  12. 【less-4】sqli-labs靶场第四关
  13. 外贸必备——各国常用搜索引擎
  14. excel查找空值快捷键_Excel快捷键查询
  15. 零基础入行IC,选模拟版图还是数字后端?
  16. 如何在电脑中安装虚拟机?
  17. teamviewer 使用数量到达上限_Teamviewer免费版到达设备上限号就废了?
  18. Github每日精选(第77期):Go (Golang) 编写的 HTTP Web 框架gin
  19. 博客园上海俱乐部活动报道
  20. PHP使用imagick扩展合成透明GIF图帧重叠问题解决方案

热门文章

  1. python约瑟夫环问题给十个学生编号报到3者出列_趣味算法--约瑟夫环问题(示例代码)...
  2. 建立一个按年龄排序的有序链表,每个结点包括学号、姓名、性别、年龄。建立一个新的结点,通过年龄将此结点插入到链表中去,使之仍然有序
  3. 【c语言】数组逆序排列
  4. java postdelayed_你真的懂Handler.postDelayed()的原理吗?
  5. Modeling Filters and Whitening Filters
  6. iOS 开发中的多线程
  7. fastReport 随记
  8. 熟练Linux ,先从这 26 个命令开始吧
  9. 想写点什么留下点念想
  10. matlab ia模块,MathWorks发布MATLAB和SimulinkR2020a版本,为工程师和科学家提供更多AI功能...