7. write a python program to implement image segmentation using RGB to HSV Code: # Importing Necessary Libraries from skimage import data from skimage.color import rgb2hsv import matplotlib.pyplot as plt # Setting the plot size to 15,15 plt.figure(figsize=(15, 15)) # Sample Image of scikit - image package coffee = data.coffee() plt.subplot(1, 2, 1) # Displaying the sample image plt.imshow(coffee) # Converting RGB Image to HSV Image hsv_coffee = rgb2hsv(coffee) plt.subplot(1, 2, 2) # Displaying the sample image - HSV Format hsv_coffee_colorbar = plt.imshow(hsv_coffee) # Adjusting colorbar to fit the size of the image plt.colorbar(hsv_coffee_colorbar, fraction=0.046, pad=0.04) Output 8. Write a python program to implement image segmentation using thresholding # Importing Necessary Libraries # Displaying the sample image - Monochrome Format from skimage import data from skimage import filters from skimage.color import rgb2gray import matplotlib.pyplot as plt # Sample Image of scikit - image package coffee = data.coffee() gray_coffee = rgb2gray(coffee) # Setting the plot size to 15,15 plt.figure(figsize=(15, 15)) for i in range(10): # Iterating different thresholds binarized_gray = (gray_coffee > i*0.1)*1 plt.subplot(5,2,i+1) # Rounding of the threshold # value to 1 decimal point plt.title("Threshold: >"+str(round(i*0.1,1))) # Displaying the binarized image # of various thresholds plt.imshow(binarized_gray, cmap = 'gray') plt.tight_layout() output 9. write a python program for text classification using Navie Bayes algorithm. print(' \ n * ----- * Classification using Naïve bayes * ----- * \ n') total_documents = int(input("Enter the Total Number of documents: ")) doc_class = [] i = 0 keywords = [] while not i == total_documents: doc_class.append([]) text = input(f" \ nEnter the text of Doc - {i+1} : ").lower() clas = input(f"Enter the class of Doc - {i+1} : ") doc_class[i].append(text.split()) doc_class[i].append(clas) keywords.extend(text.split()) i = i+1 keywords = set(keywords) keywords = list(keywords) keywords.sort() to_find = input( " \ nEnter the Text to classify using Naive Bayes: ").lower().split() probability_table = [] for i in range(total_documents): probability_table.append([]) for j in keywords: probability_table[i].append(0) doc_id = 1 for i in range(total_documents): for k in range(len(keywords)): if keywords[k] in doc_class[i][0]: probability_table[i][k] += doc_class[i][0].count(keywords[k]) print(' \ n') Output: * ----- * Classification using Naïve bayes * ----- * Enter the Total Number of documents: 3 Enter the text of Doc - 1 : I watched the movie. Enter the class of Doc - 1 : + Enter the text of Doc - 2 : I hated the movie. Enter the class of Doc - 2 : - Enter the text of Doc - 3 : poor acting. Enter the class of Doc - 3 : + Enter the Text to classify using Naive Bayes: I hated the acting. 10. write a python program to implement document probabilities import prettytable keywords.insert(0, 'Document ID') keywords.append("Class") Prob_Table = prettytable.PrettyTable() Prob_Table.field_names = keywords Prob_Table.title = 'Probability of Documents' x = 0 for i in probability_table: i.insert(0, x+1) i.append( doc_class[x][1]) Prob_Table.add_row(i) x = x+1 print(Prob_Table) print(' \ n') for i in probability_table: i.pop(0) totalpluswords = 0 totalnegwords = 0 totalplus = 0 totalneg = 0 vocabulary = len(keywords) - 2 for i in probability_table: if i[len(i) - 1] == "+": totalplus += 1 totalpluswords += sum(i[0:len(i) - 1]) else: totalneg += 1 totalnegwords += sum(i[0:len(i) - 1]) keywords.pop(0) keywords.pop(len(keywords) - 1) Output: + --------------------------------------------------------------------------- + | Probability of Documents | + ------------- + --------- + ------- + --- + -------- + ------ + ----- + --------- + ------- + | Document ID | acting . | hated | i | movie. | poor | the | watched | Class | + ------------- + --------- + ------- + --- + -------- + ------ + ----- + --------- + ------- + | 1 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | + | | 2 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | - | | 3 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | + | + ------------- + --------- + ------- + --- + -------- + ------ + ----- + --------- + ------- + 11. write a python program to implement sentiment analysis using Vanila RNN. # fixing every word's embedding size to be 32 embd_len = 32 # Creating a RNN model RNN_model = Sequential(name="Simple_RNN") RNN_model.add(Embedding(vocab_size, embd_len, input_length=max_words)) # In case of a stacked(more than one layer of RNN) # use return_sequences=True RNN_model.add(SimpleRNN(128, activation='tanh', return_sequences=False)) RNN_model.add(Dense(1, activation='sigmoid')) # printing model summary print(RNN_model.summary()) # Compiling model RNN_model.compile( loss="binary_crossentropy", optimizer='adam', metrics=['accuracy'] ) # Training the model history = RNN_model.fit(x_train_, y_train_, batch_size=64, epochs=5, verbose=1, validation_data=(x_valid, y_valid)) # Printing model score on test data print() print("Simple_RNN Score --- > ", RNN_model.evaluate(x_test, y_test, verbose=0)) Output: 12. write a python program to implement sequential information extraction using LSTM . # Defining LSTM model lstm_model = Sequential(name="LSTM_Model") lstm_model.add(Embedding(vocab_size, embd_len, input_length=max_words)) lstm_model.add(LSTM(128, activation='relu', return_sequences=False)) lstm_model.add(Dense(1, activation='sigmoid')) # Printing Model Summary print(lstm_model.summary()) # Compiling the model lstm_model.compile( loss="binary_crossentropy", optimizer='adam', metrics=['accuracy'] ) # Training the model history3 = lstm_model.fit(x_train_, y_train_, batch_size=64, epochs=5, verbose=2, validation_data=(x_valid, y_valid)) # Displaying the model accuracy on test data print() print("LSTM model Score --- > ", lstm_model.evaluate(x_test, y_test, verbose=0)) Output: