Project for Python Create an application containing the foll

Project for Python Create an application containing the following 2+ user defined classes data members per class including at least one use of an list including at least one use of a dictionary (dictionary value should be a complex object, not just a simple value such as number or string) 2+ manipulation methods per class Demonstrate the use of if/if-else statements while loops forloops, including use of range exceptions (try except 1 or more nested loops (while and/or for) File IO for reading and writing data Import library and use of imported library9bjects Due at 4:00pm Dec 12, 2016 University of Colorado CSCI 1300 Fall 2016 Boulder

Solution

I am giving one example code that covers all the topics which you have mentioned.

import sys
import nltk
from nltk.tokenize import PunktSentenceTokenizer
from nltk.corpus import product_reviews_2
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from replacers import RegexpReplacer
from nltk import pos_tag
from nltk.tree import Tree
from nltk import ne_chunk


text = product_reviews_2.raw(\'ipod.txt\')

sent_tokenizer = PunktSentenceTokenizer()
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words(\'english\'))
reg_tokenizer = RegexpTokenizer(\"[\\w\']+\")
custom_reg_tokenizer = RegexpTokenizer(\"[##]\")
replacer = RegexpReplacer()
stemmer = PorterStemmer()

class DataPreprocess:

def __init__(self):
pass
  
def stopWordRemoval(self):
try:
stopWriteFile = open (\"stopwords_deleted.txt\",\"a\")
i = 1
for sent in sent_tokenizer.tokenize(replacer.replace(text)):
for word in reg_tokenizer.tokenize(sent):
if word.lower() not in stop_words:
stopWriteFile.write((lemmatizer.lemmatize(word.lower()) + \" \"))   
stopWriteFile.write(\".\ \")

except Exception as ex:
print str(ex)
  
finally:   
stopWriteFile.close()
  

def pos_Tagging(self):
freq_list = []
try:
stopReadFile = open (\"stopwords_deleted.txt\",\"r\")
posWriteFile = open (\"pos_tagged.txt\",\"a\")
nounWriteFile = open(\"cSet_1.txt\",\"a\")
nounSentWriteFile = open(\"nounSents.txt\",\"a\")

for review_sent in sent_tokenizer.tokenize(stopReadFile.read()):
review_word = word_tokenize(review_sent)
tagged = ne_chunk(nltk.pos_tag(review_word),binary=False)
for key,tag in tagged:
if tag == \'NN\':
nounSentWriteFile.write(\" \"+key)
nounSentWriteFile.write(\".\ \")
  
posWriteFile.write(str(tagged)+\"\ \")
chunkGrammar_NN = r\"\"\"NN: {<NN>} \"\"\"
chunkParser_NN = nltk.RegexpParser(chunkGrammar_NN)
tree_NN = chunkParser_NN.parse(tagged)
for subtree in tree_NN.subtrees():
if subtree.label() == \'NN\':
for word,label in subtree.leaves():
nounWriteFile.write(word+\" \")#candidate sets
else:
pass

except Exception as ex:
print(str(ex))
  
finally:
stopReadFile.close()
posWriteFile.close()
nounWriteFile.close()
nounSentWriteFile.close()

  
  
  
  
  
  
  

 Project for Python Create an application containing the following 2+ user defined classes data members per class including at least one use of an list includin
 Project for Python Create an application containing the following 2+ user defined classes data members per class including at least one use of an list includin

Get Help Now

Submit a Take Down Notice

Tutor
Tutor: Dr Jack
Most rated tutor on our site