7.EXPERIMENT 7(NO DATASET) - SVM from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import matplotlib.pyplot as plt import seaborn as sns iris=load_iris() x,y= iris.data,iris.target x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=1) model=SVC(kernel='rbf',C=1.0,gamma='scale') model.fit(x_train,y_train) pred=model.predict(x_test) print("\nPredictions:",pred) print("\nAccuracy:",accuracy_score(y_test,pred)) print("\nConfusion Matrix:",confusion_matrix(y_test,pred)) print("\nClassification Report:",classification_report(y_test,pred)) cm=confusion_matrix(y_test,pred) plt.figure(figsize=(6,4)) sns.heatmap(cm,annot=True,cmap="Blues",fmt="d",xticklabels=iris.target_names,yticklabels=iri s.target_names) plt.xlabel("Predicted Sepal Length") plt.ylabel("Actual Sepal Length") plt.title("Confusion Matrix") plt.show() # Scatter plot plt.figure(figsize=(6,4)) for i in range(len(iris.target_names)): plt.scatter( x[y==i,0], x[y==i,1], label=iris.target_names[i], alpha=0.7 ) plt.title("SVC with Linear Kernel") plt.xlabel("Sepal Length") plt.ylabel("Sepal Width") plt.legend() plt.show() Predictions: [0 1 1 0 2 1 2 0 0 2 1 0 2 1 1 0 1 1 0 0 1 1 2 0 2 1 0 0 1 2] Accuracy: 0.9666666666666667 Confusion Matrix: [[11 0 0] [ 0 12 1] [ 0 0 6]] Classification Report: precision recall f1-score support 0 1.00 1.00 1.00 11 1 1.00 0.92 0.96 13 2 0.86 1.00 0.92 6 accuracy 0.97 30 macro avg 0.95 0.97 0.96 30 weighted avg 0.97 0.97 0.97 30 8.EXPERIMENT 8(CNN) from tensorflow.keras.datasets import mnist from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense # Load data (X_train, y_train), (X_test, y_test) = mnist.load_data() # Reshape to (samples, 28, 28, 1) and normalize X_train = X_train.reshape(-1, 28, 28, 1) / 255.0 X_test = X_test.reshape(-1, 28, 28, 1) / 255.0 # Check shapes print(X_train.shape) print(X_test.shape) # Build model model = Sequential([ Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), MaxPool2D((2, 2)), Flatten(), Dense(100, activation='relu'), Dense(10, activation='softmax') ]) # Compile model model.compile( loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # Train model model.fit(X_train, y_train, epochs=10) # Evaluate model model.evaluate(X_test, y_test) 1.EXPERIMENT 1(NB) - DIABETICS import pandas as pd from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import matplotlib.pyplot as plt import seaborn as sns # Load numeric dataset file_path = r"diabetes.csv" data = pd.read_csv(file_path) print("Dataset Loaded:", data.shape) print(data.head()) # Split into X and y X = data.drop(columns=['Outcome']) y = data['Outcome'] # Train-test split X_train, X_test, Y_train, Y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Gaussian Naive Bayes model = GaussianNB() model.fit(X_train, Y_train) # Prediction pred = model.predict(X_test) # Evaluation print("\nAccuracy:", accuracy_score(Y_test, pred)) print("\nConfusion Matrix:\n", confusion_matrix(Y_test, pred)) print("\nClassification Report:\n", classification_report(Y_test, pred)) # Plot confusion matrix cm = confusion_matrix(Y_test, pred) plt.figure(figsize=(6,5)) sns.heatmap(cm, annot=True, cmap='Blues', fmt='d') plt.xlabel("Predicted") plt.ylabel("Actual") plt.title("Confusion Matrix") plt.show() Dataset Loaded: (768, 9) Pregnancies Glucose BloodPressure SkinThickness Insulin BMI \ 0 6 148 72 35 0 33.6 1 1 85 66 29 0 26.6 2 8 183 64 0 0 23.3 3 1 89 66 23 94 28.1 4 0 137 40 35 168 43.1 DiabetesPedigreeFunction Age Outcome 0 0.627 50 1 1 0.351 31 0 2 0.672 32 1 3 0.167 21 0 4 2.288 33 1 Accuracy: 0.7662337662337663 Confusion Matrix: [[79 20] [16 39]] Classification Report: precision recall f1-score support 2. EXPERIMENT 2(NB)-NAIVE TEXT import pandas as pd from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import matplotlib.pyplot as plt import seaborn as sns # Load text dataset file_path = r"naivetext.csv" data = pd.read_csv(file_path, names=['message', 'label']) print("Dataset Loaded:", data.shape) print(data.head()) # Convert labels data['labelnum'] = data['label'].map({'pos': 1, 'neg': 0}) X = data['message'] y = data['labelnum'] # Train-test split X_train, X_test, Y_train, Y_test = train_test_split( X, y, test_size=0.25, random_state=42 ) # Convert text to numeric matrix vectorizer = CountVectorizer() X_train_dtm = vectorizer.fit_transform(X_train) X_test_dtm = vectorizer.transform(X_test) # Multinomial Naive Bayes model = MultinomialNB() model.fit(X_train_dtm, Y_train) # Prediction pred = model.predict(X_test_dtm) # Evaluation print("\nAccuracy:", accuracy_score(Y_test, pred)) print("\nConfusion Matrix:\n", confusion_matrix(Y_test, pred)) print("\nClassification Report:\n", classification_report(Y_test, pred)) # Plot confusion matrix cm = confusion_matrix(Y_test, pred) plt.figure(figsize=(6,5)) sns.heatmap(cm, annot=True, cmap='Blues', fmt='d') plt.xlabel("Predicted") plt.ylabel("Actual") plt.title("Confusion Matrix") plt.show() Dataset Loaded: (18, 2) message label 0 I love this sandwich pos 1 This is an amazing place pos 2 I feel very good about these beers pos 3 This is my best work pos 4 What an awesome view pos Accuracy: 0.6 Confusion Matrix: [[2 0] [2 1]] Classification Report: precision recall f1-score support 0 0.50 1.00 0.67 2 1 1.00 0.33 0.50 3 accuracy 0.60 5 macro avg 0.75 0.67 0.58 5 weighted avg 0.80 0.60 0.57 5 3. EXPERIMENT 3 (NB) - HEART import pandas as pd from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import matplotlib.pyplot as plt import seaborn as sns # Load medical dataset file_path = r"heart.csv" data = pd.read_csv(file_path) print("Dataset Loaded:", data.shape) print(data.head()) # Split into X and y X = data.drop(columns=['target']) # Change to 'heartdisease' if dataset uses that y = data['target'] # Train-test split X_train, X_test, Y_train, Y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) # Gaussian Naive Bayes model = GaussianNB() model.fit(X_train, Y_train) # Prediction pred = model.predict(X_test) # Evaluation print("\nAccuracy:", accuracy_score(Y_test, pred)) print("\nConfusion Matrix:\n", confusion_matrix(Y_test, pred)) print("\nClassification Report:\n", classification_report(Y_test, pred)) # Plot confusion matrix cm = confusion_matrix(Y_test, pred) plt.figure(figsize=(6,5)) sns.heatmap(cm, annot=True, cmap='Blues', fmt='d') plt.xlabel("Predicted") plt.ylabel("Actual") plt.title("Confusion Matrix") plt.show() Accuracy: 0.8 Confusion Matrix: [[72 30] [11 92]] Classification Report: precision recall f1-score support 0 0.87 0.71 0.78 102 1 0.75 0.89 0.82 103 accuracy 0.80 205 macro avg 0.81 0.80 0.80 205 weighted avg 0.81 0.80 0.80 205 6.EXPERIMENT 6- TIPS import numpy as np import pandas as pd import matplotlib.pyplot as plt file_path=r"C:\Users\radzz\Downloads\tips.csv" data = pd.read_csv(file_path) bill = data["total_bill"].values tip = data["tip"].values X = np.c_[np.ones_like(bill), bill] y = tip k = 0.2 def lwlr(X, y, k): preds = [] for x0 in X: diff = X - x0 w = np.exp(np.sum(diff**2, axis=1) / (-2 * k**2)) W = np.diag(w) beta = np.linalg.pinv(X.T @ W @ X) @ (X.T @ W @ y) preds.append(x0 @ beta) return np.array(preds) y_pred = lwlr(X, y, k) idx = np.argsort(X[:, 1]) plt.scatter(bill, tip, color="green") plt.plot(X[idx, 1], y_pred[idx], color="red", linewidth=5) plt.xlabel("Total bill") plt.ylabel("Tip") plt.title("Locally Weighted Linear Regression on Tips Data") plt.show() 5.EXPERIMENT 5 - IRIS import pandas as pd from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix, accuracy_score, classification_report import matplotlib.pyplot as plt import seaborn as sns # Load dataset dataset = pd.read_csv(r"C:\Users\rashm\Downloads\Iris.csv") # Features (all columns except last) and target (last column) X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values # Train-test split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=0 ) # KNN classifier classifier = KNeighborsClassifier(n_neighbors=8, p=3, metric='euclidean') classifier.fit(X_train, y_train) # Predictions y_pred = classifier.predict(X_test) # Confusion Matrix cm = confusion_matrix(y_test, y_pred) print("Confusion Matrix:\n", cm) # Classification Report print("\nAccuracy Metrics:") print(classification_report(y_test, y_pred)) # Accuracy acc = accuracy_score(y_test, y_pred) print("Correct Predictions:", acc) print("Wrong Predictions:", 1 - acc) # Detailed predictions print("\nDetailed Predictions:") for i, (actual, predicted) in enumerate(zip(y_test, y_pred), start=1): status = "Correct" if actual == predicted else "Wrong" print(f"Sample {i}: Actual = {actual}, Predicted = {predicted} --> {status}") # Plot confusion matrix plt.figure(figsize=(6, 5)) sns.heatmap( cm, annot=True, fmt='d', cmap='Blues', xticklabels=classifier.classes_, yticklabels=classifier.classes_ ) plt.xlabel('Predicted Labels') plt.ylabel('True Labels') plt.title('Confusion Matrix - k-NN (k=8)') plt.show() Confusion Matrix: [[13 0 0] [ 0 16 0] [ 0 0 9]] Accuracy Metrics: precision recall f1-score support Iris-setosa 1.00 1.00 1.00 13 Iris-versicolor 1.00 1.00 1.00 16 Iris-virginica 1.00 1.00 1.00 9 accuracy 1.00 38 macro avg 1.00 1.00 1.00 38 weighted avg 1.00 1.00 1.00 38 Correct Predictions: 1.0 Wrong Predictions: 0.0 Detailed Predictions: Sample 1: Actual = Iris-virginica, Predicted = Iris-virginica --> Correct Sample 2: Actual = Iris-versicolor, Predicted = Iris-versicolor --> Correct Sample 3: Actual = Iris-setosa, Predicted = Iris-setosa --> Correct Sample 4: Actual = Iris-virginica, Predicted = Iris-virginica --> Correct Sample 5: Actual = Iris-setosa, Predicted = Iris-setosa --> Correct Sample 6: Actual = Iris-virginica, Predicted = Iris-virginica --> Correct