newmediamonitoring/monitor/views.py

223 lines
8.4 KiB
Python
Raw Normal View History

2020-10-09 01:42:05 +00:00
import csv
2020-10-23 04:04:33 +00:00
import datetime
2020-10-09 01:42:05 +00:00
import json
from collections import Counter
import jieba
2020-10-23 04:04:33 +00:00
from django.db.models import Sum
2020-10-09 01:42:05 +00:00
from django.http import HttpResponse
from django.shortcuts import render
2020-10-23 04:04:33 +00:00
from datetime import timedelta
# Create your views here.
2020-09-29 02:48:26 +00:00
from dashboard.models import Weixin, Weixin_data, Toutiao_data, Weibo_data, Qita_jc, Group, Toutiao, Weibo, Qita, \
2020-10-23 04:04:33 +00:00
Douyin, Douyin_data, News, TimelinessMonitoring, Organization, Wrongly
2020-10-09 01:42:05 +00:00
from monitor.models import Test
def new_media_public_opinion_weixin(request):
weixin = Weixin.objects.all()
group = Group.objects.all()
2020-09-29 02:48:26 +00:00
weixin_data = Weixin_data.objects.all().order_by('-comment')
res = []
for w in weixin_data:
o = dict()
o['id'] = str(w.id)
o['code'] = w.weixin.code
o['image'] = w.weixin.image
o['title'] = w.title
o['comment'] = w.comment
o['reply'] = w.reply
o['year'] = w.year
o['month'] = w.month
o['day'] = w.day
res.append(o)
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/new-media-public-opinion-weixin.html',
{'res': res, 'weixin': weixin, 'group': group})
def new_media_public_opinion_toutiao(request):
toutiao = Toutiao.objects.all()
group = Group.objects.all()
2020-09-29 02:48:26 +00:00
toutiao_data = Toutiao_data.objects.all().order_by('-count')
res = []
for t in toutiao_data:
o = dict()
o['id'] = str(t.id)
o['code'] = t.toutiao.code
o['image'] = t.toutiao.image
o['title'] = t.title
o['count'] = t.count
o['commentcount'] = t.commentcount
o['reply'] = t.reply
o['year'] = t.year
o['month'] = t.month
o['day'] = t.day
res.append(o)
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/new-media-public-opinion-toutiao.html',
{'res': res, 'toutiao': toutiao, 'group': group})
2020-09-29 02:48:26 +00:00
def new_media_public_opinion_douyin(request):
douyin = Douyin.objects.all()
group = Group.objects.all()
douyin_data = Douyin_data.objects.all().order_by('-comment')
res = []
for d in douyin_data:
o = dict()
o['id'] = str(d.id)
o['code'] = d.newmedia.code
o['image'] = d.newmedia.image
o['count'] = d.count
o['count_jc'] = d.count_jc
o['comment'] = d.comment
o['reply'] = d.reply
o['date'] = d.date
res.append(o)
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/new-media-public-opinion-douyin.html',
{'res': res, 'douyin': douyin, 'group': group})
def new_media_public_opinion_weibo(request):
weibo = Weibo.objects.all()
group = Group.objects.all()
2020-09-29 02:48:26 +00:00
weibo_data = Weibo_data.objects.all().order_by('-like')
res = []
for w in weibo_data:
o = dict()
o['id'] = str(w.id)
o['code'] = w.weibo.code
o['image'] = w.weibo.image
o['title'] = w.title
o['like'] = w.like
o['transpond'] = w.transpond
o['comment'] = w.comment
o['year'] = w.year
o['month'] = w.month
o['day'] = w.day
res.append(o)
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/new-media-public-opinion-weibo.html', {'res': res, 'weibo': weibo, 'group': group})
def new_media_public_opinion_qita(request):
qita = Qita.objects.all()
group = Group.objects.all()
2020-09-29 02:48:26 +00:00
qita_jc = Qita_jc.objects.all().order_by('-comment')
res = []
for q in qita_jc:
o = dict()
o['id'] = str(q.id)
o['type'] = q.qita.type
o['name'] = q.qita.name
o['image'] = q.qita.image
o['count'] = q.count
o['count_jc'] = q.count_jc
o['comment'] = q.comment
o['reply'] = q.reply
o['year'] = q.year
o['month'] = q.month
o['day'] = q.day
res.append(o)
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/new-media-public-opinion-qita.html', {'res': res, 'qita': qita, 'group': group})
2020-09-30 12:29:10 +00:00
def timeliness_monitoring(request):
2020-10-23 04:04:33 +00:00
now = datetime.datetime.now()
# 本周第一天和最后一天
this_week_start = now - timedelta(days=now.weekday())
this_week_end = now + timedelta(days=6 - now.weekday())
# 本月第一天和最后一天
this_month_start = datetime.datetime(now.year, now.month, 1)
this_month_end = datetime.datetime(now.year, now.month + 1, 1) - timedelta(days=1) + datetime.timedelta(
hours=23, minutes=59, seconds=59)
new_media_count = int(Weixin.objects.all().count())+int(Weibo.objects.all().count())+int(Toutiao.objects.all().count())+int(Douyin.objects.all().count())+int(Qita.objects.all().count())
new_media_count_month =int(Weixin.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Weibo.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Toutiao.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Douyin.objects.filter(created__range=(this_month_start,this_month_end)).count())+int(Qita.objects.filter(created__range=(this_month_start,this_month_end)).count())
new_media_count_week =int(Weixin.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Weibo.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Toutiao.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Douyin.objects.filter(created__range=(this_week_start,this_week_end)).count())+int(Qita.objects.filter(created__range=(this_week_start,this_week_end)).count())
# update_count = TimelinessMonitoring.objects.all().count()
update = TimelinessMonitoring.objects.all().aggregate(nums=Sum('update'))
update_count = update['nums']
comment = TimelinessMonitoring.objects.all().aggregate(nums=Sum('comment'))
comment_count = comment['nums']
wrongly_count = Wrongly.objects.all().count
sensitive_count = None
organization_count = Organization.objects.all().count()
return render(request, 'monitor/timeliness-monitoring.html',{'new_media_count':new_media_count,'new_media_count_month':new_media_count_month,'new_media_count_week':new_media_count_week,'update_count':update_count,'comment_count':comment_count,'wrongly_count':wrongly_count,'organization_count':organization_count})
2020-10-19 08:52:51 +00:00
2020-09-30 12:29:10 +00:00
def error_monitoring(request):
return render(request, 'monitor/error-monitoring.html')
2020-10-19 08:52:51 +00:00
2020-09-30 12:29:10 +00:00
def sensitive_word_monitoring(request):
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/sensitive-word-monitoring.html')
2020-09-30 12:29:10 +00:00
def comment_on_interactive_monitoring(request):
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/comment-on-interactive-monitoring.html')
2020-10-09 01:42:05 +00:00
def comment_on_interactive_monitoring_json(request):
data = Test.objects.all()[:500]
r = []
for d in data:
content = d.content
r.append(content)
# result = jieba.analyse.textrank(content, topK=400, withWeight=True)
seg_list = jieba.cut(str(r)) # 对文本进行分词
c = Counter()
for x in seg_list: # 进行词频统计
if len(x) > 1 and x != '\r\n':
c[x] += 1
res = []
for (k, v) in c.most_common(200): # 遍历输出高频词
2020-10-19 03:41:23 +00:00
# print('%s%s %s %d' % (' ' * (5 - len(k)), k, '*', v))
2020-10-19 08:52:51 +00:00
# 剔除不是汉字的值
if all(map(lambda c: '\u4e00' <= c <= '\u9fa5', k)):
2020-10-19 03:41:23 +00:00
o = dict()
o['name'] = k
o['value'] = v
res.append(o)
2020-10-09 01:42:05 +00:00
return HttpResponse(json.dumps({
2020-10-19 08:52:51 +00:00
"res": res
2020-10-09 01:42:05 +00:00
}))
2020-10-19 08:52:51 +00:00
2020-09-30 12:29:10 +00:00
def monitoring_report(request):
2020-10-19 08:52:51 +00:00
news = News.objects.filter(type='3').order_by('-date')
count = News.objects.filter(type='3').count()
return render(request, 'monitor/monitoring-report.html', {'news': news,'count':count})
2020-10-09 01:42:05 +00:00
def test(request):
2020-10-19 08:52:51 +00:00
return render(request, 'monitor/test.html')
2020-10-09 01:42:05 +00:00
def test_json(request):
res = []
2020-10-19 08:52:51 +00:00
with open('D:/2020/舆论监测平台/新媒体监测数据/平凉/Result_PL.csv', encoding='utf-8') as csvfile:
2020-10-09 01:42:05 +00:00
reader = csv.reader(csvfile)
results = []
try:
for r in reader:
print(r[0])
results.append(r[5])
except:
print("777777777777777777777777777777777777777777777777")
seg_list = jieba.cut(str(results)) # 对文本进行分词
c = Counter()
for x in seg_list: # 进行词频统计
if len(x) > 1 and x != '\r\n':
c[x] += 1
for (k, v) in c.most_common(200): # 遍历输出高频词
2020-10-19 08:52:51 +00:00
if all(map(lambda c: '\u4e00' <= c <= '\u9fa5', k)):
2020-10-19 06:43:13 +00:00
o = dict()
o['name'] = k
o['value'] = v
res.append(o)
2020-10-09 01:42:05 +00:00
return HttpResponse(json.dumps({
"res": res
}))