0
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
#from itertools import product

#variables that will be used

database_word_synset=[]
uploaded_sentence_synset=[]
uploaded_sentence_words_tokenized=[]
filtered_uploaded_sentences = []
database_sentence_words_tokenized=[]
filtered_database_sentence=[]
database_sentence_synset=[]

word_check=[0.0]
sentence_check=[0.0]
count_sentence=0
count_word=0
not_fond=0

#the given data

uploaded_sentence=" The issue of text semantics, such as word semantics and sentence semantics has received increasing attentions in recent years. However, rare research focuses on the document-level semantic matching due to its complexity. Long documents usually have sophisticated structure and massive information, which causes hardship to measure their semantic similarity. The semantic similarity between words, sentences, texts, and documents is widely studied in various fields, including natural language processing, document semantic comparison, artificial intelligence, semantic web, and semantic search engines. "
database_word=["car","complete",'run',"sleep"]
database_sentence="the earth is round not flat"

stopwords = stopwords.words('english')
uploaded_sentence_words_tokenized = word_tokenize(uploaded_sentence)

#filtering the sentence and synset

for word in uploaded_sentence_words_tokenized:
    if word not in stopwords:      
        filtered_uploaded_sentences.append(word)
print (filtered_uploaded_sentences)

for sentences_are in filtered_uploaded_sentences:
    uploaded_sentence_synset.append(wn.synsets(sentences_are))
    
print(uploaded_sentence_synset)

#for finding similrity in the words

for databasewords in database_word:
    database_word_synset.append(wn.synsets(databasewords))
    
print(database_word_synset)



words_list_synset=list()
for t in database_word_synset: 
    for x in t: 
        words_list_synset.append(x)

print(words_list_synset)




#removing empty list element and making single dimension list

removing_empty_list_uploaded_sentence=list()
removing_empty_list_uploaded_sentence = [x for x in uploaded_sentence_synset if x != []]

up_list_sentence=list()
for t in removing_empty_list_uploaded_sentence: 
    for x in t: 
        up_list_sentence.append(x)

print(up_list_sentence)

#the similarity main function for words
#sims=[]
#for sense1, sense2 in product(database_word_synset, up_list_sentence):
#    d = wn.wup_similarity(sense1, sense2)
#    sims.append(d)
#print (sims)
#word_found=list()
for data in words_list_synset:
    for sen in up_list_sentence :
        if wn.wup_similarity(data,sen) is None or wn.wup_similarity(data,sen) <0.70:
            not_fond=not_fond+1
        else:
            count_word=count_word+1


print (word_check)
print("\n words that are not found :",not_fond)
print("\n words that are found :", count_word)
#for finding similrity in the sentence

database_sentence_words_tokenized=word_tokenize(database_sentence)

for word in database_sentence_words_tokenized:
    if word not in stopwords:
        filtered_database_sentence.append(word)
print(filtered_database_sentence)

for sentence_synset in filtered_database_sentence:
    database_sentence_synset.append(wn.synsets(sentence_synset))
print(database_sentence_synset)

#removing empty list element and making single dimension list

removing_empty_list_db=list()
removing_empty_list_db = [x for x in database_sentence_synset if x != []]

db_list_sentence=list()
for t in removing_empty_list_db: 
    for x in t: 
        db_list_sentence.append(x)

print(db_list_sentence)

#the similarity main function for sentence

for db_sentence in db_list_sentence:
   for upl_sentence in up_list_sentence:
       sentence_check.append(wn.wup_similarity(db_sentence,upl_sentence))
           
for sentence_checks in sentence_check:
   if sentence_checks is None or sentence_checks <0.70:
      not_fond=not_fond+1
   else:
       count_sentence=count_sentence+1   
       
print (sentence_check)
print("\n words that are not found :",not_fond)
print("\n words that are found :",count_sentence)

Installing of libraries in build file android studio:

In this project we use chaquopy to use python in our android project but it have some issues like in importing of libraries i have install Nltk, wordnet, stopping words and word tokenization seperately also but i am not able to access these libaries in python file and if we install our app it crashes.

 if (! Python.isStarted()) {
           Python.start(new AndroidPlatform(this));
           Python py = Python.getInstance();
           final PyObject pyobj = py.getModule("sum");


           b2.setOnClickListener(new View.OnClickListener() {
               @Override
               public void onClick(View view) {
                   if (path==null) {
                       Toast.makeText(documentupload.this, " plz upload the doc", Toast.LENGTH_SHORT).show();
                       //upload.setText(path);


                       // Intent intent= new Intent(documentupload.this,result.class);
                       //startActivity(intent);
                   }
                   else {
                       PyObject obj = pyobj.callAttr("main", Words.toString());
                       upload.setText(obj.toString());
                       Toast.makeText(documentupload.this, "uploaded" + Words, Toast.LENGTH_LONG).show();
                      // Toast.makeText(documentupload.this, " plz upload the doc", Toast.LENGTH_LONG).show();
                   }
               }
           });

When app crash it give this error message:

mhsmith
  • 6,675
  • 3
  • 41
  • 58
Noman Omer
  • 53
  • 1
  • 4
  • Your Python code doesn't contain a function called `main`, so it's unclear what the connection between the Java and Python code is. Please edit your question to clarify this, and also include the full stack trace from the [Logcat](https://developer.android.com/studio/debug/am-logcat.html). – mhsmith Oct 02 '20 at 10:58
  • Actually cancel that, I've reproduced the problem and I'll post an answer in a few minutes. – mhsmith Oct 02 '20 at 11:01

1 Answers1

0

I assume the crash happened when calling wn.synsets? Here's the stack trace I saw:

  File "/data/user/0/com.chaquo.python.pkgtest3/files/chaquopy/AssetFinder/requirements/nltk/corpus/util.py", line 120, in __getattr__
  File "/data/user/0/com.chaquo.python.pkgtest3/files/chaquopy/AssetFinder/requirements/nltk/corpus/util.py", line 85, in __load
  File "/data/user/0/com.chaquo.python.pkgtest3/files/chaquopy/AssetFinder/requirements/nltk/corpus/util.py", line 80, in __load
  File "/data/user/0/com.chaquo.python.pkgtest3/files/chaquopy/AssetFinder/requirements/nltk/data.py", line 585, in find
LookupError: 
**********************************************************************
  Resource [93mwordnet[0m not found.

I don't think the "wordnet" and "corpus" pip packages have anything to do with nltk. Instead, you should install them using nltk.download, just as the error message says.

Because of an emulator bug, you may need to call nltk.download in a loop, as described in this answer.

mhsmith
  • 6,675
  • 3
  • 41
  • 58
  • i have download the nltk but i cannot import the nltk sub library like stopwords how to import nltk from nltk.corpus import stopwords print(stopwords.words('english')) how to make these thing working there are also other problems – Noman Omer Oct 07 '20 at 06:48
  • I've edited my answer to note that the "corpus" pip package doesn't have anything to do with nltk either, so that won't provide you with the stopwords feature. Instead, install it using `nltk.download` as I said. The error message will probably tell you which package name to use. – mhsmith Oct 07 '20 at 12:21
  • i have use this method but when we try to access the libraries from python script file it generates error and it cannot be accessed – Noman Omer Oct 09 '20 at 06:00
  • If you still need help, please create a new question and provide the full error, including the stack trace, along with the relevant sections of your code. – mhsmith Oct 09 '20 at 11:10