3

I am working on sklearn multinomial naive bayes classifier to classify the 20NewsGroup data. The code is as follows:

import numpy as np
import operator
from sklearn import datasets, naive_bayes, metrics, feature_extraction

data_train = datasets.fetch_20newsgroups(subset = 'train', shuffle = True,  random_state = 2016, remove = ('headers', 'footers', 'quotes'))
data_test = datasets.fetch_20newsgroups(subset = 'test', shuffle = True, random_state = 2016, remove = ('headers', 'footers', 'quotes'))
categories = data_train.target_names

target_map = {}

for i in range(len(categories)):
    if 'comp.' in categories[i]:
        target_map[i] = 0
    elif 'rec.' in categories[i]:
        target_map[i] = 1
    elif 'sci.' in categories[i]:
        target_map[i] = 2
    elif 'misc.forsale' in categories[i]:
        target_map[i] = 3
    elif 'talk.politics' in categories[i]:
        target_map[i] = 4
    else:
        target_map[i] = 5

y_temp = data_train.target
y_train = []

for y in y_temp:
    y_train.append(target_map[y])

y_temp = data_test.target
y_test = []

for y in y_temp:
    y_test.append(target_map[y])

count_vectorizer = feature_extraction.text.CountVectorizer(min_df = 0.01, max_df = 0.5, stop_words = 'english')
x_train = count_vectorizer.fit_transform(data_train.data)
x_test = count_vectorizer.transform(data_test.data)

feature_names= count_vectorizer.get_feature_names()

mnb_alpha_001  = naive_bayes.MultinomialNB(alpha = 0.01)

mnb_alpha_001.fit(x_train, y_train)

y_pred_001  = mnb_alpha_001.predict(x_test)

print('Accuracy Of MNB With Alpha = 0.01  : ', metrics.accuracy_score(y_test,y_pred_001))

Above code is working fine to perform the classification. Further, I want to list the 10 most distinguishable words in each Category(Category 0 - Category 5) which separate the category from other categories.

If I only have 2 Category(Category 0 - Category 1), I could use the feature_log_prob_ to compare the log probability as follows:

diff = mnb_alpha_001.feature_log_prob_[1,:] - mnb_alpha_001.feature_log_prob_[0,:]
name_diff = {}
for i in range(len(feature_names)):
    name_diff[feature_names[i]] = diff[i]
names_diff_sorted = sorted(name_diff.items(), key = operator.itemgetter(1), reverse = True)
for i in range(10):
    print(names_diff_sorted[i])

The above code will list the 10 most distinguishable words from category 1 which differentiate it from category 0. The problem is that I could not do just a simple subtraction of the log probability if I have more than 2 categories.

Would need your expert advise on how do I perform this task so I would get the 10 most distinguishable words in each category?

Thank you very much.

piman314
  • 5,285
  • 23
  • 35
Aloyz
  • 31
  • 3

1 Answers1

1
acc=[]
i=0
rr=[0.001,0.01,0.1,1,10]
for alp in [0,1,2,3,4]:
    mnb = naive_bayes.MultinomialNB(alpha = alp)
    mnb.fit(x_train, y_train)
    y_pred = mnb.predict(x_test)
    print('accuracy of Multinomial Naive Bayes for alpha ',rr[alp],'=', metrics.accuracy_score(y_test, y_pred))
    acc.append(metrics.accuracy_score(y_test, y_pred))


import operator
pos,m = max(enumerate(acc), key=operator.itemgetter(1))
print("Max accuracy=",m," for alpha=",rr[pos])

for ss in [0,1,2,3,4,5]:
    mnb = naive_bayes.MultinomialNB(alpha = rr[pos])
    mnb.fit(x_train, y_train)
    y_pred = mnb.predict(x_test)

    acc[alp]=metrics.accuracy_score(y_test, y_pred)
    feature_names = count_vectorizer.get_feature_names()
    diff = mnb.feature_log_prob_[ss,:] - np.max(mnb.feature_log_prob_[-ss:])

    name_diff = {}
    for i in range(len(feature_names)):
       name_diff[feature_names[i]] = diff[i]

       names_diff_sorted = sorted(name_diff.items(), key = op.itemgetter(1), reverse = True)
    for i in range(10):
       print(ss,names_diff_sorted[i])