A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No.1 Implementation of dsf and bsf algoirthm using Python programming A. Breadth - First Search graph = { '5' : ['3','7'], '3' : ['2', '4'], '7' : ['8'], '2' : [], '4' : ['8'], '8' : []} visited = [] # List for visited nodes. queue = [] #Initialize a queue def bfs(visited, graph, node): #function for BFS visited.append(node) queue.append(node) while queue: # Creating loop to visit each node m = queue.pop(0) print (m, end = " ") for neighbour in graph[m]: if neighbour not in visited: visited.append(neighbour) queue.append(neighbour) # Driver Code print("Following is the Breadth - First Search") bfs(visited, graph, '5') Output Following is the Breadth - First Search 5 3 7 2 4 8 A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology B. Dept h - First Search graph = { '5' : ['3','7'], '3' : ['2', '4'], '7' : ['8'], '2' : [], '4' : ['8'], '8' : [] } visited = [] # List for visited nodes. def dfs(node): visited.append(node) print(node, end=" ") for neighbour in graph[node]: if neighbour not in visited: dfs(neighbour) # Driver Code print("Following is the Depth - First Search") dfs('5') Output Following is the Depth - First Search 5 3 2 4 8 7 A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No.2 Implementation of Logic Programming usi ng PROLOG - DSF for Water jug problem % Rules for checking whether a state is valid or not valid_state(State) : - nth0(0, State, X), nth0(1, State, Y), X >= 0, X =< 4, Y >= 0, Y =< 3. % Rules for moving water between jugs move(state(X, Y), s tate(4, Y)) : - X < 4. % Fill jug X to the top move(state(X, Y), state(X, 3)) : - Y < 3. % Fill jug Y to the top move(state(X, Y), state(0, Y)) : - X > 0. % Empty jug X move(state(X, Y), state(X, 0)) : - Y > 0. % Empty jug Y move(state(X, Y), state (NewX, NewY)) : - X > 0, Y < 3, Diff is min(X, 3 - Y), NewY is Y + Diff, NewX is X - Diff. % Pour water from X to Y move(state(X, Y), state(NewX, NewY)) : - X < 4, Y > 0, Diff is min(Y, 4 - X), NewX is X + Diff, NewY is Y - Diff. % Pour water from Y to X % Rules for depth - first search dfs(State, _, Path, Path) : - goal(State). dfs(State, Visited, Path, FinalPath) : - \ + member(State, Visited), move(State, NextState), dfs(NextState, [State | Visited], [NextState | Path], FinalPath). % Goa l state: 2 gallons of water in the 4 - gallon jug goal(state(2, _)). water_jug_problem : - InitialState = state(0, 0), dfs(InitialState, [], [InitialState], Path), reverse(Path, ReversedPath), write('Solution Path: '), write(ReversedPath). Ou tput Solution Path: [state(0,0), state(4,0), state(0,3), state(4,3), state(1,3), state(1,0), state(0,1), state(4,1), state(2,3), state(2,0)] A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No.3 Introduction to Python Libraries using Colaboratory. NumPy Matplotlib ,Pandas A . NumPy: import numpy as np arr = np.array([1, 2, 3, 4, 5]) mean_value = np.mean(arr) sum_value = np.sum(arr) print("NumPy Array:", arr) print("Mean:", mean_value) print("Sum:", sum_value) Output NumPy Array: [1 2 3 4 5] Mean: 3.0 Sum: 15 B . Matplotlib: import os import matplotlib.pyplot as plt import numpy as np os.environ['MPLCONFIGDIR'] = '/tmp/matplotlib' x = np.linspace(0, 2 * np.pi, 100) y = np.sin(x) plt.plot(x, y) plt.title("Sine Function") plt.xlabel("x") plt.ylabel("sin(x)") plt.show() A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology C . Pandas impo rt pandas as pd data = {'Name': ['Alice', 'Bob', 'Charlie'], 'Age': [25, 30, 35], 'City': ['New York', 'San Francisco', 'Los Angeles']} df = pd.DataFrame(data) print("DataFrame:") print(df) Output DataFrame: Name Age City 0 Alice 25 New York 1 Bob 30 San Francisco 2 Charlie 35 Los Angeles A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No. 4 Implementation of Linear Regression. import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegre ssion np.random.seed(42) X = 2 * np.random.rand(100, 1) y = 4 + 3 * X + np.random.randn(100, 1) model = LinearRegression() model.fit(X, y) X_new = np.array([[0], [2]]) y_pred = model.predict(X_new) plt.scatter(X, y, label='Original data') plt.plot(X_new, y _pred, color='red', linewidth=3, label='Linear Regression') plt.xlabel('X') plt.ylabel('y') plt.legend() plt.title('Linear Regression Example') plt.show() Output A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No.5 Implementation of logistic regression import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, confusion_matrix, np.random.seed(42) X = 2 * np.random.rand(100, 1) y = (4 + 3 * X + np.random.randn(100, 1)) > 6 # Binary classification problem y = y.ravel() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) model = LogisticRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) conf_matrix = confusion_matrix(y_test, y_pred) print(f'Accuracy: {accuracy * 100:.2f}%') print('Confusion Matrix:') print(conf_matrix) plt.scatter(X_test, y_test, color='black', label='Actual') plt.scatter(X_test, y_pred, color='red', marker='x', label='Predicted') plt.xlabel('X') plt.ylabel('Class (0 or 1)') plt.title('Logistic Regression Example') plt.legend()plt.show() Output A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No. 6 Implementation of Dimensionality Reduction technique. import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target pca = PCA(n_components=2) X_pca = pca.fit_transform(X) plt.figure(figsize=(12, 4)) plt.subpl ot(1, 2, 1) plt.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis', edgecolor='k', s=60) plt.title('Original Data') plt.xlabel('Feature 1') plt.ylabel('Feature 2') plt.subplot(1, 2, 2) plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, cmap='viridis', edgecolor='k', s= 60) plt.title('Data after PCA') plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.tight_layout() plt.show() Output A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No. 7 Implementation of K - Means Algorithm import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.datasets import make_blobs # Generate random data for clustering X, y = make_blobs(n_samples=300, centers=4, random_state=42, cluster_std=0.60) # Apply K - Means clustering with k=4 and set n_init explicitly km eans = KMeans(n_clusters=4, n_init=10) kmeans.fit(X) # Get cluster centers and labels centers = kmeans.cluster_centers_ labels = kmeans.labels_ # Plot the original data and cluster centers plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', edgecolor=' k', s=50, alpha=0.7) plt.scatter(centers[:, 0], centers[:, 1], c='red', marker='X', s=200, label='Centroids') plt.title('K - Means Clustering') plt.xlabel('Feature 1') plt.ylabel('Feature 2') plt.legend() plt.show() A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No.8 Implementation of Support Vector Machine as a Classification algorithm import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.svm import SVC from sklearn.metrics imp ort accuracy_score, confusion_matrix iris = datasets.load_iris() X = iris.data[:, :2] # Take only the first two features for visualization purposes y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) s vm_classifier = SVC(kernel='linear', C=1) svm_classifier.fit(X_train, y_train) y_pred = svm_classifier.predict(X_test) accuracy = accuracy_score(y_test, y_pred) conf_matrix = confusion_matrix(y_test, y_pred) print(f'Accuracy: {accuracy * 100:.2f}%') print( 'Confusion Matrix:') print(conf_matrix) def plot_decision_boundary(X, y, model, title): h = .02 # Step size in the mesh x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.mes hgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.c oolwarm, edgecolors='k') plt.title(title) plt.xlabel('Feature 1') plt.ylabel('Feature 2') plt.show() plot_decision_boundary(X_test, y_test, svm_classifier, 'SVM Classifier Decision Boundary') Output A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No.9 I mplementation of B agging Algorithm : Decision Tree from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier data = datasets.load_wine(as_frame = True) X = dat a.data y = data.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 22) dtree = DecisionTreeClassifier(random_state = 22) dtree.fit(X_train,y_train) y_pred = dtree.predict(X_test) print("Train data accuracy:",a ccuracy_score(y_true = y_train, y_pred = dtree.predict(X_train))) print("Test data accuracy:",accuracy_score(y_true = y_test, y_pred = y_pred)) Output Train data accuracy: 1.0 Test data accuracy: 0.8222222222222222 A rtificial I ntelligence and M achine L earning Name : Deepak Rajbhar Seat No. 750477 I dol M umbai U niversity Vidyavardhini's College Of Engineering & Technology Practical No.10 Implementation of boosting algorithm :Gradient boosting import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import load_wine from sklearn.model_selection import train_test_split from sklearn.ensemble import GradientBoostingClassifier f rom sklearn.metrics import accuracy_score, confusion_matrix data = load_wine() X = data.data y = data.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) gradient_boosting_classifier = GradientBoostingClassifie r(n_estimators=100, learning_rate=0.1, random_state=42) gradient_boosting_classifier.fit(X_train, y_train) y_pred = gradient_boosting_classifier.predict(X_test) accuracy = accuracy_score(y_test, y_pred) conf_matrix = confusion_matrix(y_test, y_pred) print( f'Accuracy: {accuracy * 100:.2f}%') print('Confusion Matrix:') print(conf_matrix) feature_importances = gradient_boosting_classifier.feature_importances_ sorted_idx = np.argsort(feature_importances) column_names = data.feature_names if hasattr(data, ' feature_names') else [f'Feature {i}' for i in range(X.shape[1])] plt.barh(range(X.shape[1]), feature_importances[sorted_idx]) plt.yticks(range(X.shape[1]), [column_names[i] for i in sorted_idx]) plt.xlabel('Feature Importance') plt.title('Gradient Boosting Classifier - Feature Importances') plt.show() Output