keras模型可视化,层可视化及kernel可视化实例
作者:xinfeng2005 发布时间:2021-02-20 00:45:25
标签:keras,kernel,可视化
keras模型可视化:
model:
model = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(ZeroPadding2D((1,1), input_shape=(38, 38, 1)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))
# model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same',))
# model.add(Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(AveragePooling2D((5,5)))
model.add(Flatten())
# model.add(Dense(512, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(label_size, activation='softmax'))
1.层可视化:
test_x = []
img_src = cv2.imdecode(np.fromfile(r'c:\temp.tif', dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img_src, (38, 38), interpolation=cv2.INTER_CUBIC)
# img = np.random.randint(0,255,(38,38))
img = (255 - img) / 255
img = np.reshape(img, (38, 38, 1))
test_x.append(img)
###################################################################
layer = model.layers[1]
weight = layer.get_weights()
# print(weight)
print(np.asarray(weight).shape)
model_v1 = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model_v1.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))
model_v1.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model_v1.layers[1].set_weights(weight)
re = model_v1.predict(np.array(test_x))
print(np.shape(re))
re = np.transpose(re, (0,3,1,2))
for i in range(32):
plt.subplot(4,8,i+1)
plt.imshow(re[0][i]) #, cmap='gray'
plt.show()
##################################################################
model_v2 = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model_v2.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))
model_v2.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model_v2.add(BatchNormalization())
model_v2.add(MaxPooling2D(pool_size=(2, 2)))
model_v2.add(Dropout(0.25))
model_v2.add(Conv2D(64, (3, 3), activation='relu', padding='same', ))
print(len(model_v2.layers))
layer1 = model.layers[1]
weight1 = layer1.get_weights()
model_v2.layers[1].set_weights(weight1)
layer5 = model.layers[5]
weight5 = layer5.get_weights()
model_v2.layers[5].set_weights(weight5)
re2 = model_v2.predict(np.array(test_x))
re2 = np.transpose(re2, (0,3,1,2))
for i in range(64):
plt.subplot(8,8,i+1)
plt.imshow(re2[0][i]) #, cmap='gray'
plt.show()
##################################################################
model_v3 = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model_v3.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))
model_v3.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model_v3.add(BatchNormalization())
model_v3.add(MaxPooling2D(pool_size=(2, 2)))
model_v3.add(Dropout(0.25))
model_v3.add(Conv2D(64, (3, 3), activation='relu', padding='same', ))
# model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))
model_v3.add(BatchNormalization())
model_v3.add(MaxPooling2D(pool_size=(2, 2)))
model_v3.add(Dropout(0.25))
model_v3.add(Conv2D(128, (3, 3), activation='relu', padding='same', ))
print(len(model_v3.layers))
layer1 = model.layers[1]
weight1 = layer1.get_weights()
model_v3.layers[1].set_weights(weight1)
layer5 = model.layers[5]
weight5 = layer5.get_weights()
model_v3.layers[5].set_weights(weight5)
layer9 = model.layers[9]
weight9 = layer9.get_weights()
model_v3.layers[9].set_weights(weight9)
re3 = model_v3.predict(np.array(test_x))
re3 = np.transpose(re3, (0,3,1,2))
for i in range(121):
plt.subplot(11,11,i+1)
plt.imshow(re3[0][i]) #, cmap='gray'
plt.show()
2.kernel可视化:
def process(x):
res = np.clip(x, 0, 1)
return res
def dprocessed(x):
res = np.zeros_like(x)
res += 1
res[x < 0] = 0
res[x > 1] = 0
return res
def deprocess_image(x):
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
x += 0.5
x = np.clip(x, 0, 1)
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
for i_kernal in range(64):
input_img=model.input
loss = K.mean(model.layers[5].output[:, :,:,i_kernal])
# loss = K.mean(model.output[:, i_kernal])
# compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img, K.learning_phase()], [loss, grads])
# we start from a gray image with some noise
np.random.seed(0)
num_channels=1
img_height=img_width=38
input_img_data = (255- np.random.randint(0,255,(1, img_height, img_width, num_channels))) / 255.
failed = False
# run gradient ascent
print('####################################',i_kernal+1)
loss_value_pre=0
for i in range(10000):
# processed = process(input_img_data)
# predictions = model.predict(input_img_data)
loss_value, grads_value = iterate([input_img_data,1])
# grads_value *= dprocessed(input_img_data[0])
if i%1000 == 0:
# print(' predictions: ' , np.shape(predictions), np.argmax(predictions))
print('Iteration %d/%d, loss: %f' % (i, 10000, loss_value))
print('Mean grad: %f' % np.mean(grads_value))
if all(np.abs(grads_val) < 0.000001 for grads_val in grads_value.flatten()):
failed = True
print('Failed')
break
# print('Image:\n%s' % str(input_img_data[0,0,:,:]))
if loss_value_pre != 0 and loss_value_pre > loss_value:
break
if loss_value_pre == 0:
loss_value_pre = loss_value
# if loss_value > 0.99:
# break
input_img_data += grads_value * 1 #e-3
plt.subplot(8, 8, i_kernal+1)
# plt.imshow((process(input_img_data[0,:,:,0])*255).astype('uint8'), cmap='Greys') #cmap='Greys'
img_re = deprocess_image(input_img_data[0])
img_re = np.reshape(img_re, (38,38))
plt.imshow(img_re, cmap='Greys') #cmap='Greys'
# plt.show()
plt.show()
model.layers[1]
model.layers[5]
model.layers[-1]
来源:https://blog.csdn.net/xinfeng2005/article/details/78697415


猜你喜欢
- 如下所示:var table_name = table.render({});page_size = table_name.config.l
- 本文实例讲述了jQuery+php简单实现全选删除的方法。分享给大家供大家参考,具体如下:<input type="chec
- 你的设计为什么平平无奇,为什么吸引不到别人的眼球,这里先来说说什么是焦点(也可以称兴趣中心或者视觉中心),我认为用焦点更能简单准确的阐述。有
- 1.链式法则根据以前的知识,如果我们需要寻找到目标参数的值的话,我们需要先给定一个初值,然后通过梯度下降,不断对其更新,直到最终的损失值最小
- 环境:window7 x64、python3.4、django1.10一、pip install xadmin安装报错1、使用pip ins
- 1. 输入一个百分制成绩,要求输出成绩等级A、B、C、D、E,其中90~100分为A,80~89分为B,70~79分为C,60~69分为D,
- 大概来介绍一下 Django Allauth 改造的期间遇到的一些问题和改造方法,在此之前我只想说——Django Allauth 是屑。为
- 正则中有分组这个功能,在golang中也可以使用命名分组。一次匹配的情况场景还原如下:有一行文本,格式为:姓名 年龄 邮箱地址请将其转换为一
- 1. 问题描述对右图进行修改:请更换图形的风格请将 x 轴的数据改为-10 到 10请自行构造一个 y 值的函数将直方图上的数字,位置改到柱
- 可以使用 Application 对象在给定的应用程序的所有用户之间共享信息。基于 ASP 的应用程序同所有的 .asp 文件一样在一个虚拟
- 模型实例方法str():在将对象转换成字符串时会被调用。save():将模型对象保存到数据表中,ORM框架会转换成对应的insert或upd
- 本文实例讲述了Thinkphp5.0 框架使用模型Model添加、更新、删除数据操作。分享给大家供大家参考,具体如下:Thinkphp5.0
- 本文为大家分享了Python机器学习之K-Means聚类的实现代码,供大家参考,具体内容如下1.K-Means聚类原理K-means算法是很
- 一、系统介绍1.开发环境开发工具:Eclipse2021JDK版本:jdk1.8Mysql版本:8.0.132.技术选型Java+Swing
- numpy.linalg.norm函数的使用1、linalg = linear(线性)+ algebra(代数),norm则表示范数。首先需
- 最近在用fso,读取txt文本文件的内容时碰到了“输入超出了文件尾 ”的运行错误,当txt中的内容为空的时候就出现这个问题了,查了
- 在 Pandas 中有很多种方法可以进行dataframe(数据框)的合并。本文将研究这些不同的方法,以及如何将它们执行速度的对比。合并DF
- 您可以将SQL Server 数据库引擎升级到 SQL Server 2008。SQL Server 安装程序只需最少的用户干预就可升级 S
- 这周心血来潮,翻看了现在比较流行的几个JS脚本框架的底层代码,虽然是走马观花,但也受益良多,感叹先人们的伟大……感叹是为了缓解严肃的气氛并引
- 自定义用户认证系统Django 自带的用户认证系统已经可以满足大部分的情况,但是有时候我们需要某些特定的需求。Django 支持使用其他认证