How to convert a phython code for classification of images of ultrasound with fatty liver and normal liver in to matlab code
6 views (last 30 days)
Show older comments
I am using an ultrasound images datasets to classify normal liver an fatty liver.I have a total of 550 images.I do have 333 images for class abnormal and 162 images for class normal which i use it for training and validation.the rest 55 images (18 normal and 37 abnormal) for testing.below i have attached the code for the classification of two classes.here is the link for the dataset https://github.com/humedeg/amid
import PIL
print('Pillow Version:', PIL.__version__)
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
import cv2
from google.colab import drive
drive.mount('/content/drive')
train_dataset='/content/drive/MyDrive/prepocessed_US_image/train_N vs F'
test_dataset='/content/drive/MyDrive/prepocessed_US_image/test N vs F'
validation_dataset='/content/drive/MyDrive/prepocessed_US_image/val N vs F'
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Flatten, BatchNormalization, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import confusion_matrix
import itertools
import os
import shutil
import random
import glob
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
%matplotlib inline
train_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=train_dataset, target_size=(224,224), classes=['train_abnormal', 'train_normal'], batch_size=10)
valid_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=validation_dataset, target_size=(224,224), classes=['val_abnormal', 'val_normal'], batch_size=10)
test_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=test_dataset, target_size=(224,224), classes=['test_abnormal', 'test_normal'], batch_size=10, shuffle=False)
imgs, labels = next(train_batches)
def plotImages(images_arr):
fig, axes = plt.subplots(1, 10, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
plotImages(imgs)
print(labels)
from keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten, Activation
from keras.layers.convolutional import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from keras import backend as K
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
datagen.fit(train_batches)
import keras.utils as image
train_img = train_X[10]
img = image.img_to_array(train_img) # convert image to numpy arry
img = img.reshape((1,) + img.shape) # reshape image
i = 0
for batch in datagen.flow(img, save_prefix='train', save_format='png'): # this loops runs forever until we break, saving images to current directory with specified prefix
plt.figure(i)
plot = plt.imshow(image.img_to_array(batch[0]))
i += 1
if i > 4: # show 4 images
break
plt.show()
from keras.applications import InceptionV3
from keras.layers import Dense, Dropout, Activation, Flatten, GlobalAveragePooling2D
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
IMG_SHAPE = (299,299, 3)
inc_model = InceptionV3(weights = 'imagenet',
include_top = False,
input_shape = (299,299, 3))
for layer in inc_model.layers:
layer.trainable = False
print("number of layers:", len(inc_model.layers))
inc_model.summary()
# Here we freeze the last 4 layers
# Layers are set to trainable as True by default
#Adding custom Layers
x = inc_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.2)(x)
x = Dense(512, activation="relu")(x)
predictions = Dense(2, activation="softmax")(x)
print(x)
from tensorflow.keras.models import Model
# creating the final model
model_ = Model(inputs=inc_model.input, outputs=predictions)
# Lock initial layers to do not be trained
#for layer in model_.layers[:52]:
#layer.trainable = False
num_classes = 2
model_.compile(optimizer='adam', loss = 'categorical_crossentropy',metrics = ['accuracy'])
history = model_.fit(x=train_batches,
steps_per_epoch=len(train_batches),
validation_data=valid_batches,
validation_steps=len(valid_batches),
epochs=10,
verbose=2)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
test_dataset='/content/drive/MyDrive/prepocessed_US_image/test N vs F'
test_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.vgg16.preprocess_input) \
.flow_from_directory(directory=test_dataset, target_size=(224,224), classes=['test_abnormal', 'test_normal'], batch_size=10, shuffle=False)
def plotImages(images_arr):
fig, axes = plt.subplots(1, 10, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
plt.tight_layout()
plt.show()
test_imgs, test_labels = next(test_batches)
plotImages(test_imgs)
print(test_labels)
test_batches.classes
predictions = model_.predict(x=test_batches,verbose=0)
np.round(predictions)
from sklearn.metrics import confusion_matrix
import itertools
cm = confusion_matrix(y_true=test_batches.classes, y_pred=np.argmax(predictions, axis=-1))
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
test_batches.class_indices
cm_plot_labels = ['abnormal','normal']
plot_confusion_matrix(cm=cm, classes=cm_plot_labels, title='Confusion Matrix')
plt.ylabel('true label')
plt.xlabel('predicted label')
0 Comments
Answers (2)
Sivylla Paraskevopoulou
on 9 Feb 2023
There is no MATLAB tool that automatically translates Python code to MATLAB code. But you can use the importTensorFlowNetwork function to import your trained TensorFlow model, after you save it in SavedModel format. And you can perform the entire above workflow in MATLAB.
0 Comments
TianHao
on 10 Apr 2023
I found that the images of dataset are identical with each other. Is it any error for the dataset ?
0 Comments
See Also
Categories
Find more on Data Import and Analysis in Help Center and File Exchange
Community Treasure Hunt
Find the treasures in MATLAB Central and discover how the community can help you!
Start Hunting!