import dask.bag as db
import ujson as json
import pandas as pd
import numpy as np
import gzip
import re
b=db.read_text(r'F:/kaggle_dataset/亚马逊评论/reviews_Digital_Music_5.json.gz',encoding='utf-8').map(json.loads)
b.take(1)
print(sum([1 for _ in gzip.open(r'F:/kaggle_dataset/亚马逊评论/reviews_Digital_Music_5.json.gz')])) #统计多少条数据
tempDir = 'F:/kaggle_dataset/亚马逊评论/制作亚马逊用户评论词云'
stopwords=set(pd.read_csv('C:/Users/zhangshuai_lc/stopwords_en.txt',header=None)[0])
pattern = re.compile(r'\w+') #正则
def hashFile():
temp_path_list = []
for i in range(1,101):
temp_path_list.append(open(tempDir+'/'+str(i)+'.txt',mode='w')) #构造100个文本文件路径
for each in (gzip.open(r'F:/kaggle_dataset/亚马逊评论/reviews_Digital_Music_5.json.gz')):
sentence = eval(each) #字符串转字典
words = sentence['reviewText']
words_list = pattern.findall(words)
#print(words_list)
for word in words_list:
if word.lower() not in stopwords and len(word) >= 2:
word = word.lower()
temp_path_list[hash(word)%100].write(word+'\n') #对单词进行hash,相同的单词一定会hash到同一个文件中
for f in temp_path_list:
f.close()
hashFile()
这一步是要将亚马逊用户对音乐的评论从原始数据中提取出来,然后使用正则表达式将评论分词,然后进行哈希映射将所有单词分配到100个文本文件中存储。相同的单词一定会被分配到同一个文件当中。
import os
from collections import Counter
results = Counter()
for root, dirs, files in os.walk(r'F:/kaggle_dataset/亚马逊评论/制作亚马逊用户评论词云'):
for file in files:
with open(os.path.join(root, file)) as f:
words_list = f.readlines()
words_list = list(map(lambda x: x.strip('\n'),words_list))
word_common_1000 = Counter(words_list).most_common(1000)
results.update(word_common_1000)
将每个文件中出现频率最高的1000个单词存入results当中。使用堆统计results当中出现频率最高的100单词。
import heapq
words_fren_list = list(results.keys())
words_fren_list_100 = heapq.nlargest(100,words_fren_list,key = lambda x:x[1])
len(words_fren_list_100)
联系客服