newmediamonitoring/monitor/views.py

223 lines
8.4 KiB
Python

import csv
import datetime
import json
from collections import Counter
import jieba
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import render
from datetime import timedelta
# Create your views here.
from dashboard.models import Weixin, Weixin_data, Toutiao_data, Weibo_data, Qita_jc, Group, Toutiao, Weibo, Qita, \
Douyin, Douyin_data, News, TimelinessMonitoring, Organization, Wrongly
from monitor.models import Test
def new_media_public_opinion_weixin(request):
weixin = Weixin.objects.all()
group = Group.objects.all()
weixin_data = Weixin_data.objects.all().order_by('-comment')
res = []
for w in weixin_data:
o = dict()
o['id'] = str(w.id)
o['code'] = w.weixin.code
o['image'] = w.weixin.image
o['title'] = w.title
o['comment'] = w.comment
o['reply'] = w.reply
o['year'] = w.year
o['month'] = w.month
o['day'] = w.day
res.append(o)
return render(request, 'monitor/new-media-public-opinion-weixin.html',
{'res': res, 'weixin': weixin, 'group': group})
def new_media_public_opinion_toutiao(request):
toutiao = Toutiao.objects.all()
group = Group.objects.all()
toutiao_data = Toutiao_data.objects.all().order_by('-count')
res = []
for t in toutiao_data:
o = dict()
o['id'] = str(t.id)
o['code'] = t.toutiao.code
o['image'] = t.toutiao.image
o['title'] = t.title
o['count'] = t.count
o['commentcount'] = t.commentcount
o['reply'] = t.reply
o['year'] = t.year
o['month'] = t.month
o['day'] = t.day
res.append(o)
return render(request, 'monitor/new-media-public-opinion-toutiao.html',
{'res': res, 'toutiao': toutiao, 'group': group})
def new_media_public_opinion_douyin(request):
douyin = Douyin.objects.all()
group = Group.objects.all()
douyin_data = Douyin_data.objects.all().order_by('-comment')
res = []
for d in douyin_data:
o = dict()
o['id'] = str(d.id)
o['code'] = d.newmedia.code
o['image'] = d.newmedia.image
o['count'] = d.count
o['count_jc'] = d.count_jc
o['comment'] = d.comment
o['reply'] = d.reply
o['date'] = d.date
res.append(o)
return render(request, 'monitor/new-media-public-opinion-douyin.html',
{'res': res, 'douyin': douyin, 'group': group})
def new_media_public_opinion_weibo(request):
weibo = Weibo.objects.all()
group = Group.objects.all()
weibo_data = Weibo_data.objects.all().order_by('-like')
res = []
for w in weibo_data:
o = dict()
o['id'] = str(w.id)
o['code'] = w.weibo.code
o['image'] = w.weibo.image
o['title'] = w.title
o['like'] = w.like
o['transpond'] = w.transpond
o['comment'] = w.comment
o['year'] = w.year
o['month'] = w.month
o['day'] = w.day
res.append(o)
return render(request, 'monitor/new-media-public-opinion-weibo.html', {'res': res, 'weibo': weibo, 'group': group})
def new_media_public_opinion_qita(request):
qita = Qita.objects.all()
group = Group.objects.all()
qita_jc = Qita_jc.objects.all().order_by('-comment')
res = []
for q in qita_jc:
o = dict()
o['id'] = str(q.id)
o['type'] = q.qita.type
o['name'] = q.qita.name
o['image'] = q.qita.image
o['count'] = q.count
o['count_jc'] = q.count_jc
o['comment'] = q.comment
o['reply'] = q.reply
o['year'] = q.year
o['month'] = q.month
o['day'] = q.day
res.append(o)
return render(request, 'monitor/new-media-public-opinion-qita.html', {'res': res, 'qita': qita, 'group': group})
def timeliness_monitoring(request):
now = datetime.datetime.now()
# 本周第一天和最后一天
this_week_start = now - timedelta(days=now.weekday())
this_week_end = now + timedelta(days=6 - now.weekday())
# 本月第一天和最后一天
this_month_start = datetime.datetime(now.year, now.month, 1)
this_month_end = datetime.datetime(now.year, now.month + 1, 1) - timedelta(days=1) + datetime.timedelta(
hours=23, minutes=59, seconds=59)
new_media_count = int(Weixin.objects.all().count())+int(Weibo.objects.all().count())+int(Toutiao.objects.all().count())+int(Douyin.objects.all().count())+int(Qita.objects.all().count())
new_media_count_month =int(Weixin.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Weibo.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Toutiao.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Douyin.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Qita.objects.filter(created__range=(this_month_start,this_month_end)).count())
new_media_count_week =int(Weixin.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Weibo.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Toutiao.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Douyin.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Qita.objects.filter(created__range=(this_week_start,this_week_end)).count())
# update_count = TimelinessMonitoring.objects.all().count()
update = TimelinessMonitoring.objects.all().aggregate(nums=Sum('update'))
update_count = update['nums']
comment = TimelinessMonitoring.objects.all().aggregate(nums=Sum('comment'))
comment_count = comment['nums']
wrongly_count = Wrongly.objects.all().count
sensitive_count = None
organization_count = Organization.objects.all().count()
return render(request, 'monitor/timeliness-monitoring.html',{'new_media_count':new_media_count,'new_media_count_month':new_media_count_month,'new_media_count_week':new_media_count_week,'update_count':update_count,'comment_count':comment_count,'wrongly_count':wrongly_count,'organization_count':organization_count})
def error_monitoring(request):
return render(request, 'monitor/error-monitoring.html')
def sensitive_word_monitoring(request):
return render(request, 'monitor/sensitive-word-monitoring.html')
def comment_on_interactive_monitoring(request):
return render(request, 'monitor/comment-on-interactive-monitoring.html')
def comment_on_interactive_monitoring_json(request):
data = Test.objects.all()[:500]
r = []
for d in data:
content = d.content
r.append(content)
# result = jieba.analyse.textrank(content, topK=400, withWeight=True)
seg_list = jieba.cut(str(r)) # 对文本进行分词
c = Counter()
for x in seg_list: # 进行词频统计
if len(x) > 1 and x != '\r\n':
c[x] += 1
res = []
for (k, v) in c.most_common(200): # 遍历输出高频词
# print('%s%s %s %d' % (' ' * (5 - len(k)), k, '*', v))
# 剔除不是汉字的值
if all(map(lambda c: '\u4e00' <= c <= '\u9fa5', k)):
o = dict()
o['name'] = k
o['value'] = v
res.append(o)
return HttpResponse(json.dumps({
"res": res
}))
def monitoring_report(request):
news = News.objects.filter(type='3').order_by('-date')
count = News.objects.filter(type='3').count()
return render(request, 'monitor/monitoring-report.html', {'news': news,'count':count})
def test(request):
return render(request, 'monitor/test.html')
def test_json(request):
res = []
with open('D:/2020/舆论监测平台/新媒体监测数据/平凉/Result_PL.csv', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
results = []
try:
for r in reader:
print(r[0])
results.append(r[5])
except:
print("777777777777777777777777777777777777777777777777")
seg_list = jieba.cut(str(results)) # 对文本进行分词
c = Counter()
for x in seg_list: # 进行词频统计
if len(x) > 1 and x != '\r\n':
c[x] += 1
for (k, v) in c.most_common(200): # 遍历输出高频词
if all(map(lambda c: '\u4e00' <= c <= '\u9fa5', k)):
o = dict()
o['name'] = k
o['value'] = v
res.append(o)
return HttpResponse(json.dumps({
"res": res
}))