You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
960 lines
43 KiB
960 lines
43 KiB
|
10 months ago
|
import datetime
|
||
|
|
import json
|
||
|
|
import logging
|
||
|
|
import os
|
||
|
|
import shutil
|
||
|
|
import time
|
||
|
|
import traceback
|
||
|
|
import uuid
|
||
|
|
from concurrent.futures import ThreadPoolExecutor
|
||
|
|
from urllib import parse
|
||
|
|
|
||
|
|
from django.db import transaction
|
||
|
|
from django.http import HttpResponse, FileResponse
|
||
|
|
from hashids import Hashids
|
||
|
|
from rest_framework.filters import OrderingFilter, SearchFilter
|
||
|
|
from rest_framework.generics import get_object_or_404
|
||
|
|
from rest_framework.permissions import IsAuthenticated
|
||
|
|
from rest_framework.views import APIView
|
||
|
|
from rest_framework.viewsets import ModelViewSet
|
||
|
|
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
|
||
|
|
|
||
|
|
from ChaCeRndTrans.basic import CCAIResponse
|
||
|
|
from ChaCeRndTrans.code import BAD, SERVER_ERROR
|
||
|
|
from ChaCeRndTrans.settings import ID_KEY, FILE_PATH, FILE_HTTP, MEDIA_ROOT, MAX_MP4_FILE_SIZE
|
||
|
|
from common.models import Archive, OperationHistoryLog
|
||
|
|
from common.serializers.archive_serializer import ArchiveSerializer
|
||
|
|
from utils.custom import CommonPagination, RbacPermission, AESEnDECryptRelated, req_operate_by_user, asyncDeleteFile, \
|
||
|
|
generate_random_str_for_fileName, generate_random_str, create_operation_history_log
|
||
|
|
|
||
|
|
from django.core.files.storage import default_storage
|
||
|
|
|
||
|
|
from rest_framework.decorators import action
|
||
|
|
|
||
|
|
from django.db import connection
|
||
|
|
|
||
|
|
from rbac.models import UserProfile
|
||
|
|
|
||
|
|
err_logger = logging.getLogger('error')
|
||
|
|
|
||
|
|
# 创建线程池
|
||
|
|
threadPool = ThreadPoolExecutor(max_workers=10, thread_name_prefix="test_")
|
||
|
|
|
||
|
|
|
||
|
|
class ArchiveViewSet(ModelViewSet):
|
||
|
|
"""
|
||
|
|
用户上传的资料管理
|
||
|
|
"""
|
||
|
|
perms_map = ({"*": "admin"}, {"*": "comadmin"}, {"*": "archive_all"}, {"get": "archive_list"}, {"post": "archive_create"}, {"put": "archive_update"},
|
||
|
|
{"delete": "archive_delete"})
|
||
|
|
queryset = Archive.objects.all()
|
||
|
|
serializer_class = ArchiveSerializer
|
||
|
|
pagination_class = CommonPagination
|
||
|
|
filter_backends = (OrderingFilter, SearchFilter)
|
||
|
|
ordering_fields = ("create_time",)
|
||
|
|
# search_fields = ("Name", "ProjectCodes", )
|
||
|
|
authentication_classes = (JSONWebTokenAuthentication,)
|
||
|
|
permission_classes = (IsAuthenticated, RbacPermission)
|
||
|
|
|
||
|
|
def get_object(self):
|
||
|
|
"""
|
||
|
|
重写get_object获取对象函数
|
||
|
|
@return:
|
||
|
|
"""
|
||
|
|
pk = self.kwargs.get('pk')
|
||
|
|
try:
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
decode_id = hashids.decode(pk)
|
||
|
|
if not decode_id:
|
||
|
|
err_logger.error("decode id failed, id: \n%s" % pk)
|
||
|
|
return CCAIResponse("invalid ID", BAD)
|
||
|
|
# 使用id 获取对象
|
||
|
|
obj = self.queryset.get(id=decode_id[0])
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("get object failed, id: \n%s" % pk)
|
||
|
|
return CCAIResponse("invalid ID", BAD)
|
||
|
|
# 检查对象是否存在
|
||
|
|
self.check_object_permissions(self.request, obj)
|
||
|
|
return obj
|
||
|
|
|
||
|
|
def list(self, request, *args, **kwargs):
|
||
|
|
"""
|
||
|
|
获取上传文件列表
|
||
|
|
:param request:
|
||
|
|
:param args:
|
||
|
|
:param kwargs:
|
||
|
|
:return:
|
||
|
|
"""
|
||
|
|
try:
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
params = request.GET
|
||
|
|
page_size = params.get('size', None)
|
||
|
|
page = params.get('page', None)
|
||
|
|
companyMid = params.get('companyMid', None)
|
||
|
|
ProjectCodes = params.get('ProjectCodes', None)
|
||
|
|
title = params.get('title', None)
|
||
|
|
Label = params.get('Label', None)
|
||
|
|
pagination = {}
|
||
|
|
adminUserId = UserProfile.objects.filter(roles__id=1).first() # 查询管理员用户id
|
||
|
|
|
||
|
|
sql = r'''
|
||
|
|
SELECT a.id, a.MainId, a.Name, a.Ext, a.ProjectCodes, a.Label, a.Url, a.Remark, a.Global,
|
||
|
|
a.Label, a.CreateBy, a.UpdateBy, a.CreateByUid, a.UpdateByUid,
|
||
|
|
a.CreateDateTime, a.UpdateDateTime, a.companyMid, a.title
|
||
|
|
FROM common_archive a
|
||
|
|
WHERE 1=1
|
||
|
|
'''
|
||
|
|
count_sql = r'''SELECT a.id FROM common_archive a WHERE 1=1 '''
|
||
|
|
where_params = []
|
||
|
|
|
||
|
|
if companyMid:
|
||
|
|
temp = " AND a.companyMid = '{}' ".format(companyMid)
|
||
|
|
sql = sql + temp
|
||
|
|
count_sql = count_sql + temp
|
||
|
|
# where_params.append(companyMid)
|
||
|
|
else:
|
||
|
|
return CCAIResponse("参数缺失!", BAD)
|
||
|
|
# 可能有多个项目
|
||
|
|
if ProjectCodes:
|
||
|
|
temp = ' AND ( '
|
||
|
|
project_codes_list = ProjectCodes.split(',')
|
||
|
|
for index, code in enumerate(project_codes_list):
|
||
|
|
if index == len(project_codes_list) - 1:
|
||
|
|
temp = temp + " FIND_IN_SET('{}', a.ProjectCodes) ".format(code)
|
||
|
|
else:
|
||
|
|
temp = temp + " FIND_IN_SET('{}', a.ProjectCodes) OR ".format(code)
|
||
|
|
temp = temp + ' ) '
|
||
|
|
sql = sql + temp
|
||
|
|
count_sql = count_sql + temp
|
||
|
|
# temp = " AND FIND_IN_SET('{}', a.ProjectCodes) ".format(ProjectCodes
|
||
|
|
# where_params.append(ProjectCodes)
|
||
|
|
|
||
|
|
if title:
|
||
|
|
temp = r''' AND a.title like %s '''
|
||
|
|
sql = sql + temp
|
||
|
|
count_sql = count_sql + temp
|
||
|
|
where_params.append('%' + title + '%')
|
||
|
|
|
||
|
|
if Label:
|
||
|
|
temp = r''' AND a.Label = '{}' '''.format(Label)
|
||
|
|
sql = sql + temp
|
||
|
|
count_sql = count_sql + temp
|
||
|
|
|
||
|
|
# 每页最多20条
|
||
|
|
if page_size:
|
||
|
|
if int(page_size) > 20:
|
||
|
|
err_logger.error("user: %s, page size over failed, size: \n%s" % (request.user.id, page_size))
|
||
|
|
page_size = 20
|
||
|
|
else:
|
||
|
|
page_size = 20
|
||
|
|
|
||
|
|
if page:
|
||
|
|
if int(page) > 2:
|
||
|
|
if request.user.id is None:
|
||
|
|
err_logger.error("user: %s, page over failed, size: \n%s" % (
|
||
|
|
request.user.id, page))
|
||
|
|
page = 1
|
||
|
|
page_size = 20
|
||
|
|
else:
|
||
|
|
page = 1
|
||
|
|
start_index = (int(page) - 1) * int(page_size)
|
||
|
|
# 管理员上传的文件,所有用户可以查看
|
||
|
|
# sql += " OR a.Global = {} ".format(1)
|
||
|
|
# count_sql += " OR a.Global = {} ".format(1)
|
||
|
|
# sql += " OR a.CreateByUid = {} ".format(adminUserId.id)
|
||
|
|
# count_sql += " OR a.CreateByUid = {} ".format(adminUserId.id)
|
||
|
|
count_sql = count_sql + ' ORDER BY {} desc'.format('a.id')
|
||
|
|
sql += ' ORDER BY {} desc LIMIT {}, {} '.format('a.CreateDateTime', start_index, page_size)
|
||
|
|
queryset = Archive.objects.raw(sql, where_params)
|
||
|
|
# 查看总数
|
||
|
|
count = 0
|
||
|
|
count_result = Archive.objects.raw(count_sql, where_params)
|
||
|
|
count = len(count_result)
|
||
|
|
# 返回分页结果
|
||
|
|
rows = []
|
||
|
|
for item in queryset:
|
||
|
|
item.__dict__.pop('_state')
|
||
|
|
item.__dict__['CreateDateTime'] = item.__dict__['CreateDateTime'].strftime('%Y-%m-%d %H:%M:%S')
|
||
|
|
item.__dict__['UpdateDateTime'] = item.__dict__['UpdateDateTime'].strftime('%Y-%m-%d %H:%M:%S')
|
||
|
|
item.__dict__['id'] = hashids.encode(item.__dict__['id'])
|
||
|
|
if item.__dict__['Url']:
|
||
|
|
media_folder = FILE_PATH.replace("\\", "/").split("/")[-1]
|
||
|
|
if media_folder == "media": # 开发时
|
||
|
|
item.__dict__['Url'] = FILE_HTTP + parse.quote(
|
||
|
|
item.__dict__['Url'].replace("upload", "media"))
|
||
|
|
else: # 线上
|
||
|
|
item.__dict__['Url'] = FILE_HTTP + parse.quote(item.__dict__['Url'])
|
||
|
|
rows.append(item.__dict__)
|
||
|
|
pagination['page'] = page
|
||
|
|
pagination['page_size'] = page_size
|
||
|
|
# encrypt_instance = AESEnDECryptRelated()
|
||
|
|
# new_ciphertext = encrypt_instance.start_encrypt(rows)
|
||
|
|
# print("e1:", new_ciphertext)
|
||
|
|
return CCAIResponse(data=rows, count=count, pagination=pagination)
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, get archive list failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("获取文件列表失败", status=500)
|
||
|
|
|
||
|
|
def retrieve(self, request, *args, **kwargs):
|
||
|
|
try:
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
archive_id = kwargs.get('pk')
|
||
|
|
archive_id = hashids.decode(archive_id)[0]
|
||
|
|
archive = get_object_or_404(self.queryset, pk=int(archive_id))
|
||
|
|
data = self.get_serializer(archive, many=False)
|
||
|
|
return CCAIResponse(data=data)
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, get archive detail failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("获取文件详情失败", status=500)
|
||
|
|
|
||
|
|
def update(self, request, *args, **kwargs):
|
||
|
|
"""
|
||
|
|
修改已经上传的文件信息
|
||
|
|
@param request:
|
||
|
|
@param args:
|
||
|
|
@param kwargs:
|
||
|
|
@return:
|
||
|
|
"""
|
||
|
|
try:
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
data = req_operate_by_user(request=request)
|
||
|
|
if not data['title']:
|
||
|
|
return CCAIResponse("Missing title", BAD)
|
||
|
|
# if not data['ProjectCodes']:
|
||
|
|
# return CCAIResponse("Missing ProjectCodes", BAD)
|
||
|
|
if data['ProjectCodes'] and len(data['ProjectCodes']) > 0:
|
||
|
|
data['ProjectCodes'] = ','.join(data['ProjectCodes'])
|
||
|
|
else:
|
||
|
|
data['ProjectCodes'] = None
|
||
|
|
if not data['Label']:
|
||
|
|
return CCAIResponse("Missing Label", BAD)
|
||
|
|
if not request.user.is_superuser and request.user.id != data['CreateByUid']:
|
||
|
|
return CCAIResponse("不能修改他人文件", BAD)
|
||
|
|
data['UpdateByUid'] = request.user.id
|
||
|
|
data['UpdateDateTime'] = datetime.datetime.now()
|
||
|
|
data['UpdateBy'] = request.user.name
|
||
|
|
archive_id = kwargs.get('pk')
|
||
|
|
archive_id = hashids.decode(archive_id)[0]
|
||
|
|
data['id'] = archive_id
|
||
|
|
partial = kwargs.pop('partial', False) # True:所有字段全部更新, False:仅更新提供的字段
|
||
|
|
# instance = self.get_object(archive_id)
|
||
|
|
instance = self.get_object()
|
||
|
|
serializer = self.get_serializer(instance, data=data, partial=partial)
|
||
|
|
serializer.is_valid(raise_exception=True)
|
||
|
|
self.perform_update(serializer)
|
||
|
|
|
||
|
|
if getattr(instance, '_prefetched_objects_cache', None):
|
||
|
|
# If 'prefetch_related' has been applied to a queryset, we need to
|
||
|
|
# forcibly invalidate the prefetch cache on the instance.
|
||
|
|
instance._prefetched_objects_cache = {}
|
||
|
|
# 操作记录
|
||
|
|
info = {
|
||
|
|
"des": "更新文档",
|
||
|
|
"detail": "文档名称: " + data["Name"]
|
||
|
|
}
|
||
|
|
create_operation_history_log(request, info, OperationHistoryLog)
|
||
|
|
|
||
|
|
return CCAIResponse(data="success")
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, update article failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("更新文章失败", status=500)
|
||
|
|
|
||
|
|
def create(self, request, *args, **kwargs):
|
||
|
|
try:
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
data = req_operate_by_user(request=request)
|
||
|
|
data = req_operate_by_user(request)
|
||
|
|
serializer = self.get_serializer(data=data)
|
||
|
|
serializer.is_valid(raise_exception=True)
|
||
|
|
self.perform_create(serializer)
|
||
|
|
# headers = self.get_success_headers(serializer.data)
|
||
|
|
# 操作记录
|
||
|
|
info = {
|
||
|
|
"des": "非法创建文章",
|
||
|
|
"detail": "文章名称: " + params["title"]
|
||
|
|
}
|
||
|
|
create_operation_history_log(request, info, OperationHistoryLog)
|
||
|
|
return CCAIResponse(data="success")
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, create archive failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("创建文章失败", status=500)
|
||
|
|
|
||
|
|
def destroy(self, request, *args, **kwargs):
|
||
|
|
"""
|
||
|
|
删除文件
|
||
|
|
@param request:
|
||
|
|
@param args:
|
||
|
|
@param kwargs:
|
||
|
|
@return:
|
||
|
|
"""
|
||
|
|
try:
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
userid = request.user.id
|
||
|
|
is_superuser = request.user.is_superuser
|
||
|
|
archive_id = kwargs.get('pk')
|
||
|
|
archive_id = hashids.decode(archive_id)[0]
|
||
|
|
url_path = None
|
||
|
|
with transaction.atomic():
|
||
|
|
instance = self.get_object()
|
||
|
|
url_path = instance.Url
|
||
|
|
self.perform_destroy(instance)
|
||
|
|
|
||
|
|
# 通常在文件数据不是非常多的情况下,可以认为编码与文件是一对一,即编码唯一
|
||
|
|
# 文件存放路径
|
||
|
|
D_FILE_PATH = FILE_PATH.replace("\\", "/")
|
||
|
|
upload_folder = D_FILE_PATH.split("/")[-1]
|
||
|
|
if upload_folder == "media": # 开发时
|
||
|
|
DEL_FILE_PATH = "/".join(D_FILE_PATH.split("/")[0:-1]) # 去掉"media", 因为url里面存在"upload"
|
||
|
|
url_path = url_path.replace("upload", "media")
|
||
|
|
else: # 线上
|
||
|
|
DEL_FILE_PATH = D_FILE_PATH
|
||
|
|
url_path = url_path
|
||
|
|
|
||
|
|
# 删除物理文件 DEL_FILE_PATH + url_path:文件完整的路径
|
||
|
|
if os.path.exists(DEL_FILE_PATH + url_path):
|
||
|
|
# os.remove(url_path) # 直接删除文件,考虑到大文件删除比较慢,所以使用异步删除
|
||
|
|
try:
|
||
|
|
threadPool.submit(asyncDeleteFile, request, DEL_FILE_PATH + url_path)
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, local delete archive failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("删除文件资源失败", status=200)
|
||
|
|
else:
|
||
|
|
err_logger.error("user: %s, want to delete archive dosen't exist, archive's name: \n%s" % (
|
||
|
|
request.user.id, instance.Name))
|
||
|
|
|
||
|
|
info = {
|
||
|
|
"des": "删除文章",
|
||
|
|
"detail": "文件名称: " + instance.Name
|
||
|
|
}
|
||
|
|
create_operation_history_log(request, info, OperationHistoryLog)
|
||
|
|
return CCAIResponse(msg="删除成功!", status=200)
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, delete article failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("删除用户上传文件资源失败", status=500)
|
||
|
|
|
||
|
|
@action(methods=["get"], detail=False, permission_classes=[IsAuthenticated],
|
||
|
|
url_path="getGlobal", url_name="getGlobal")
|
||
|
|
def getGlobal(self, request, *args, **kwargs):
|
||
|
|
"""
|
||
|
|
获取管理员上传的全局文件列表
|
||
|
|
:param request:
|
||
|
|
:param args:
|
||
|
|
:param kwargs:
|
||
|
|
:return:
|
||
|
|
"""
|
||
|
|
try:
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
params = request.GET
|
||
|
|
page_size = params.get('size', None)
|
||
|
|
page = params.get('page', None)
|
||
|
|
companyMid = params.get('companyMid', None)
|
||
|
|
ProjectCodes = params.get('ProjectCodes', None)
|
||
|
|
title = params.get('title', None)
|
||
|
|
Label = params.get('Label', None)
|
||
|
|
pagination = {}
|
||
|
|
adminUserId = UserProfile.objects.filter(roles__id=1).first() # 查询管理员用户id
|
||
|
|
|
||
|
|
sql = r'''
|
||
|
|
SELECT a.id, a.MainId, a.Name, a.Ext, a.ProjectCodes, a.Label, a.Url, a.Remark, a.Global,
|
||
|
|
a.Label, a.CreateBy, a.UpdateBy, a.CreateByUid, a.UpdateByUid,
|
||
|
|
a.CreateDateTime, a.UpdateDateTime, a.companyMid,
|
||
|
|
a.issuingDepartment, a.dateOfPublication, a.originalLink, a.digest, a.title
|
||
|
|
FROM common_archive a
|
||
|
|
WHERE 1=1
|
||
|
|
'''
|
||
|
|
count_sql = r'''SELECT a.id FROM common_archive a WHERE 1=1 '''
|
||
|
|
where_params = []
|
||
|
|
|
||
|
|
if title:
|
||
|
|
temp = r''' AND a.title like %s '''
|
||
|
|
sql = sql + temp
|
||
|
|
count_sql = count_sql + temp
|
||
|
|
where_params.append('%' + title + '%')
|
||
|
|
|
||
|
|
# 每页最多20条
|
||
|
|
if page_size:
|
||
|
|
if int(page_size) > 20:
|
||
|
|
err_logger.error("user: %s, page size over failed, size: \n%s" % (request.user.id, page_size))
|
||
|
|
page_size = 20
|
||
|
|
else:
|
||
|
|
page_size = 20
|
||
|
|
|
||
|
|
if page:
|
||
|
|
if int(page) > 2:
|
||
|
|
if request.user.id is None:
|
||
|
|
err_logger.error("user: %s, page over failed, size: \n%s" % (
|
||
|
|
request.user.id, page))
|
||
|
|
page = 1
|
||
|
|
page_size = 20
|
||
|
|
else:
|
||
|
|
page = 1
|
||
|
|
start_index = (int(page) - 1) * int(page_size)
|
||
|
|
# 管理员上传的文件,所有用户可以查看
|
||
|
|
sql += " AND a.Global = {} ".format(1)
|
||
|
|
count_sql += " AND a.Global = {} ".format(1)
|
||
|
|
# sql += " OR a.CreateByUid = {} ".format(adminUserId.id)
|
||
|
|
# count_sql += " OR a.CreateByUid = {} ".format(adminUserId.id)
|
||
|
|
count_sql = count_sql + ' ORDER BY {} desc'.format('a.id')
|
||
|
|
sql += ' ORDER BY {} desc LIMIT {}, {} '.format('a.CreateDateTime', start_index, page_size)
|
||
|
|
queryset = Archive.objects.raw(sql, where_params)
|
||
|
|
# 查看总数
|
||
|
|
count = 0
|
||
|
|
count_result = Archive.objects.raw(count_sql, where_params)
|
||
|
|
count = len(count_result)
|
||
|
|
# 返回分页结果
|
||
|
|
rows = []
|
||
|
|
for item in queryset:
|
||
|
|
item.__dict__.pop('_state')
|
||
|
|
item.__dict__['CreateDateTime'] = item.__dict__['CreateDateTime'].strftime('%Y-%m-%d %H:%M:%S')
|
||
|
|
item.__dict__['UpdateDateTime'] = item.__dict__['UpdateDateTime'].strftime('%Y-%m-%d %H:%M:%S')
|
||
|
|
if item.__dict__['dateOfPublication']:
|
||
|
|
item.__dict__['dateOfPublication'] = item.__dict__['dateOfPublication'].strftime('%Y-%m-%d')
|
||
|
|
item.__dict__['id'] = hashids.encode(item.__dict__['id'])
|
||
|
|
if item.__dict__['Url']:
|
||
|
|
media_folder = FILE_PATH.replace("\\", "/").split("/")[-1]
|
||
|
|
if media_folder == "media": # 开发时
|
||
|
|
item.__dict__['Url'] = FILE_HTTP + parse.quote(
|
||
|
|
item.__dict__['Url'].replace("upload", "media"))
|
||
|
|
else: # 线上
|
||
|
|
item.__dict__['Url'] = FILE_HTTP + parse.quote(item.__dict__['Url'])
|
||
|
|
rows.append(item.__dict__)
|
||
|
|
pagination['page'] = page
|
||
|
|
pagination['page_size'] = page_size
|
||
|
|
# encrypt_instance = AESEnDECryptRelated()
|
||
|
|
# new_ciphertext = encrypt_instance.start_encrypt(rows)
|
||
|
|
# print("e1:", new_ciphertext)
|
||
|
|
return CCAIResponse(data=rows, count=count, pagination=pagination)
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, get archive list failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("获取文件列表失败", status=500)
|
||
|
|
|
||
|
|
|
||
|
|
|
||
|
|
class BreakPointUploadAPIView(APIView):
|
||
|
|
"""
|
||
|
|
音频、视屏、PDF上传接口(断点续传,BreakPoint组件接口)
|
||
|
|
"""
|
||
|
|
|
||
|
|
perms_map = ({"*": "admin"}, {"*": "comadmin"}, {"*", "archive_all"}, {"post": "archive_create"}, {"put": "archive_update"})
|
||
|
|
authentication_classes = (JSONWebTokenAuthentication,)
|
||
|
|
permission_classes = (IsAuthenticated, RbacPermission)
|
||
|
|
|
||
|
|
def post(self, request):
|
||
|
|
"""
|
||
|
|
备查文件 上传接口 包含: 断点续传,或者直传,两种方式
|
||
|
|
@param request:
|
||
|
|
@return:
|
||
|
|
"""
|
||
|
|
try:
|
||
|
|
params = request.data
|
||
|
|
hash_name = params.get("hashName", None)
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
type_ = params.get("type", 1) # 类型:1-文档 2-pdf 3-excel 4-图片 5-ppt 6-mp4
|
||
|
|
if type_ is None:
|
||
|
|
return CCAIResponse("Miss hashName", status=BAD)
|
||
|
|
type_map = {1: 'doc', 2: 'pdf', 3: 'excel', 4: 'pic', 5: 'ppt', 6: 'mp4'}
|
||
|
|
typeName = type_map.get(int(type_), 'doc') # 文件类型目录名
|
||
|
|
file_name = params.get("fileName", None)
|
||
|
|
ProjectCodes = params.get("ProjectCodes", None) # 绑定的项目ID,可能同时绑定多个项目
|
||
|
|
Remark = params.get("Remark", None) # 备注
|
||
|
|
companyMid = request.GET.get('companyMid', None) # 公司MainID
|
||
|
|
title = params.get('title', None)
|
||
|
|
last_dot_index = title.rfind('.')
|
||
|
|
if not title:
|
||
|
|
return CCAIResponse("missing fileName", status=BAD)
|
||
|
|
if last_dot_index != -1:
|
||
|
|
title = title[:last_dot_index]
|
||
|
|
else:
|
||
|
|
return title # 如果没有找到 '.',则返回原字符串
|
||
|
|
|
||
|
|
if companyMid is None:
|
||
|
|
return CCAIResponse("Miss companyMid", status=BAD)
|
||
|
|
if ProjectCodes and ProjectCodes !='' and len(ProjectCodes.split(',')) > 0:
|
||
|
|
ProjectCodes = ProjectCodes
|
||
|
|
else:
|
||
|
|
ProjectCodes = None
|
||
|
|
|
||
|
|
if file_name is None:
|
||
|
|
return CCAIResponse("Miss fileName", status=BAD)
|
||
|
|
# adminUser = UserProfile.objects.filter(roles__id=1).first() # 查询管理员用户
|
||
|
|
# Gobal 是否后台管理员上传全局查看资料 管理员为 1
|
||
|
|
Global = 2
|
||
|
|
if request.user.is_superuser:
|
||
|
|
Global = 1
|
||
|
|
|
||
|
|
# 将媒体文件统一放到upload下面
|
||
|
|
D_FILE_PATH = FILE_PATH.replace("\\", "/")
|
||
|
|
media_folder = D_FILE_PATH.split("/")[-1]
|
||
|
|
if media_folder != "media": # 线上存储路径
|
||
|
|
NEW_FILE_PATH = os.path.join(D_FILE_PATH, "upload")
|
||
|
|
if not os.path.exists(NEW_FILE_PATH):
|
||
|
|
os.makedirs(NEW_FILE_PATH)
|
||
|
|
else: # 本地开发时用到的路径
|
||
|
|
NEW_FILE_PATH = D_FILE_PATH
|
||
|
|
|
||
|
|
file_dir = os.path.join(NEW_FILE_PATH, "hash") # 存放hash文件的总目录
|
||
|
|
|
||
|
|
if hash_name:
|
||
|
|
# 断点续传
|
||
|
|
file_folder_hash, current_chunk = hash_name.split(
|
||
|
|
"_") # 每一个文件块的文件名,例如:313b0fe8c583ab4f6e1ef4747e501f9f_0.mp3
|
||
|
|
|
||
|
|
file_folder_hash = str(request.user.id) + file_folder_hash # hash文件夹名称,加id主要防止不同用户上传一模一样的文件
|
||
|
|
|
||
|
|
h_file_name = os.path.splitext(file_name)[0] # file_name.split(".")[0] # 文件名(不包含扩展)
|
||
|
|
ext = os.path.splitext(file_name)[1].replace(".", "") # file_name.split(".")[-1] # 扩展
|
||
|
|
hash_dir = os.path.join(file_dir, ext, h_file_name, file_folder_hash) # 用hash名命名的hash文件夹,存放hash文件块
|
||
|
|
if not os.path.exists(hash_dir):
|
||
|
|
os.makedirs(hash_dir)
|
||
|
|
|
||
|
|
files = os.listdir(hash_dir)
|
||
|
|
count = len(files)
|
||
|
|
if int(current_chunk) + 1 < count:
|
||
|
|
# 判断里面的文件是否连续,因为有些过期文件可能被定时任务删除了
|
||
|
|
file_index = []
|
||
|
|
for each_file in files:
|
||
|
|
index_str = each_file.split("_")[-1]
|
||
|
|
file_index.append(int(index_str.split(".")[0])) # 存放每个文件的下标
|
||
|
|
file_index.sort() # 将下标进行排序
|
||
|
|
compare_list = [] # 用来记录期望的文件下标
|
||
|
|
for each_num in range(len(file_index)):
|
||
|
|
compare_list.append(each_num)
|
||
|
|
# 开始判断期望的文件下标数组是否和已经存在的文件下标数组相等
|
||
|
|
if file_index == compare_list:
|
||
|
|
nextIndex = count
|
||
|
|
else:
|
||
|
|
shutil.rmtree(hash_dir) # 里面的文件不连续,先删除然后再新建一个一样的文件夹
|
||
|
|
os.makedirs(hash_dir)
|
||
|
|
nextIndex = int(current_chunk) + 1
|
||
|
|
file_chunk = request.FILES.get('file', '') # 获取每一个文件块
|
||
|
|
save_path = os.path.join(hash_dir, hash_name + "." + ext) # 文件块存储路径
|
||
|
|
file = open(save_path, "wb")
|
||
|
|
file.write(file_chunk.read())
|
||
|
|
file.close()
|
||
|
|
|
||
|
|
else:
|
||
|
|
nextIndex = int(current_chunk) + 1
|
||
|
|
file_chunk = request.FILES.get('file', '') # 获取每一个文件块
|
||
|
|
save_path = os.path.join(hash_dir, hash_name + "." + ext) # 文件块存储路径
|
||
|
|
file = open(save_path, "wb")
|
||
|
|
file.write(file_chunk.read())
|
||
|
|
file.close()
|
||
|
|
data = {
|
||
|
|
"code": 200,
|
||
|
|
"message": "成功",
|
||
|
|
"nextIndex": nextIndex
|
||
|
|
}
|
||
|
|
return HttpResponse(json.dumps(data), content_type="application/json")
|
||
|
|
|
||
|
|
else:
|
||
|
|
# 直传
|
||
|
|
file_obj_list = request.FILES.getlist('file')
|
||
|
|
if not file_obj_list:
|
||
|
|
return CCAIResponse("文件为空", BAD)
|
||
|
|
# tags = params.get("tags", None)
|
||
|
|
# roles = params['roles']
|
||
|
|
# public = params.get("public", 1)
|
||
|
|
# is_download = params.get("is_download", 1)
|
||
|
|
type_ = params.get("type", 1) # 类型:1-文档 2-pdf 3-excel 4-图片 5-ppt 6-mp4
|
||
|
|
type_map = {1: 'doc', 2: 'pdf', 3: 'excel', 4: 'pic', 5: 'ppt', 6: 'mp4'}
|
||
|
|
typeName = type_map.get(int(type_), 'doc') # 文件类型目录名
|
||
|
|
# importance = params.get("importance", 1)
|
||
|
|
|
||
|
|
err_data = {
|
||
|
|
"code": 500,
|
||
|
|
"message": "参数错误",
|
||
|
|
}
|
||
|
|
|
||
|
|
suc_data = {
|
||
|
|
"code": 200,
|
||
|
|
"message": "成功"
|
||
|
|
}
|
||
|
|
|
||
|
|
is_limit_size = (file_obj_list[0].size / 1024 / 1024) < MAX_MP4_FILE_SIZE # 不超过100M
|
||
|
|
if not is_limit_size:
|
||
|
|
data = {
|
||
|
|
"code": 500,
|
||
|
|
"message": "不允许上传超多100M的文件!",
|
||
|
|
}
|
||
|
|
return HttpResponse(json.dumps(data), content_type="application/json")
|
||
|
|
|
||
|
|
import time
|
||
|
|
ct = time.time() # 取得系统时间
|
||
|
|
local_time = time.localtime(ct) # 将系统时间转成结构化时间
|
||
|
|
date_head = time.strftime("%Y%m%d", local_time) # 格式化时间
|
||
|
|
date_m_secs = str(datetime.datetime.now().timestamp()).split(".")[-1] # 毫秒级时间戳
|
||
|
|
time_stamp = "%s%.3s" % (date_head, date_m_secs) # 拼接时间字符串
|
||
|
|
|
||
|
|
if not os.path.exists(os.path.join(NEW_FILE_PATH, typeName)):
|
||
|
|
os.makedirs(os.path.join(NEW_FILE_PATH, typeName))
|
||
|
|
save_dir = os.path.join(NEW_FILE_PATH, typeName, date_head)
|
||
|
|
# 如果不存在则创建目录
|
||
|
|
if not os.path.exists(save_dir):
|
||
|
|
os.makedirs(save_dir)
|
||
|
|
|
||
|
|
if file_name == "" or file_name is None:
|
||
|
|
return HttpResponse(json.dumps(err_data), content_type="application/json")
|
||
|
|
ext = os.path.splitext(file_name)[1].replace(".", "") # file_name.split(".")[-1] # 文件扩展名
|
||
|
|
title = "无标题"
|
||
|
|
# 打开文件,没有新增
|
||
|
|
for each in file_obj_list:
|
||
|
|
random_name = time_stamp + generate_random_str_for_fileName() + "." + ext
|
||
|
|
file_path = os.path.join(save_dir, random_name) # 文件存储路径
|
||
|
|
destination = open(file_path, 'wb')
|
||
|
|
for chunk in each.chunks():
|
||
|
|
destination.write(chunk)
|
||
|
|
destination.close()
|
||
|
|
title = os.path.splitext(each.name)[0] # "".join(each.name.split(".")[0:-1]) # 使用音频名字作为标题
|
||
|
|
url = os.path.join('/upload', typeName, date_head, random_name)
|
||
|
|
url = url.replace("\\", "/")
|
||
|
|
# 将数据写入数据库article表
|
||
|
|
Archive.objects.create(
|
||
|
|
MainId=uuid.uuid4().__str__(),
|
||
|
|
Name=title,
|
||
|
|
Ext=ext,
|
||
|
|
ProjectCodes=ProjectCodes,
|
||
|
|
Label=int(type_),
|
||
|
|
Url=url,
|
||
|
|
Remark=Remark,
|
||
|
|
Global=Global,
|
||
|
|
CreateBy=request.user.name,
|
||
|
|
UpdateBy=request.user.name,
|
||
|
|
CreateByUid=request.user.id,
|
||
|
|
UpdateByUid=request.user.id,
|
||
|
|
CreateDateTime=datetime.datetime.now(),
|
||
|
|
UpdateDateTime=datetime.datetime.now(),
|
||
|
|
companyMid=companyMid,
|
||
|
|
title=title
|
||
|
|
)
|
||
|
|
# 操作记录
|
||
|
|
if int(type_) == 1:
|
||
|
|
info = {
|
||
|
|
"des": "上传文档",
|
||
|
|
"detail": "文档名称: " + title
|
||
|
|
}
|
||
|
|
elif int(type_) == 2:
|
||
|
|
info = {
|
||
|
|
"des": "上传PDF文件",
|
||
|
|
"detail": "PDF文件名称: " + title
|
||
|
|
}
|
||
|
|
elif int(type_) == 3:
|
||
|
|
info = {
|
||
|
|
"des": "上传Excel文件",
|
||
|
|
"detail": "Excel文件名称: " + title
|
||
|
|
}
|
||
|
|
elif int(type_) == 4:
|
||
|
|
info = {
|
||
|
|
"des": "上传图片",
|
||
|
|
"detail": "图片名称: " + title
|
||
|
|
}
|
||
|
|
else:
|
||
|
|
info = {
|
||
|
|
"des": "上传PPT文件",
|
||
|
|
"detail": "PPT文件名称: " + title
|
||
|
|
}
|
||
|
|
create_operation_history_log(request, info, OperationHistoryLog)
|
||
|
|
|
||
|
|
return HttpResponse(json.dumps(suc_data), content_type="application/json")
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error(
|
||
|
|
"user: %s, break point upload file failed: \n%s" % (request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("断点续传上传失败", status=500)
|
||
|
|
|
||
|
|
|
||
|
|
class MergeFile(APIView):
|
||
|
|
authentication_classes = (JSONWebTokenAuthentication,)
|
||
|
|
permission_classes = (IsAuthenticated,)
|
||
|
|
"""
|
||
|
|
合并文件(断点续传, BreakPoint组件接口)
|
||
|
|
"""
|
||
|
|
|
||
|
|
def get(self, request): # 合并文件
|
||
|
|
try:
|
||
|
|
params = request.GET
|
||
|
|
err_data = {
|
||
|
|
"code": 500,
|
||
|
|
"message": "参数错误",
|
||
|
|
"state": 0
|
||
|
|
}
|
||
|
|
|
||
|
|
hashids = Hashids(salt=ID_KEY, min_length=32)
|
||
|
|
|
||
|
|
hash_name = params.get("hashName", None)
|
||
|
|
file_name = params.get("fileName", None)
|
||
|
|
file_total_length = params.get("chunks", 0)
|
||
|
|
type_ = params.get("type", None)
|
||
|
|
type_map = {1: 'doc', 2: 'pdf', 3: 'excel', 4: 'pic', 5: 'ppt', 6: 'mp4'}
|
||
|
|
path_type = type_map.get(int(type_), 'doc') # 文件类型目录名
|
||
|
|
ProjectCodes = params.get("ProjectCodes", None)
|
||
|
|
Remark = params.get("Remark", None)
|
||
|
|
companyMid = params.get("companyMid", None)
|
||
|
|
title = params.get('title', None)
|
||
|
|
last_dot_index = title.rfind('.')
|
||
|
|
if not title:
|
||
|
|
return CCAIResponse("missing fileName", status=BAD)
|
||
|
|
if last_dot_index != -1:
|
||
|
|
title = title[:last_dot_index]
|
||
|
|
else:
|
||
|
|
return title # 如果没有找到 '.',则返回原字符串
|
||
|
|
|
||
|
|
if not file_name:
|
||
|
|
return CCAIResponse("missing fileName", status=BAD)
|
||
|
|
# if ProjectCodes is None:
|
||
|
|
# return CCAIResponse("Miss ProjectCodes", status=BAD)
|
||
|
|
if not companyMid:
|
||
|
|
return CCAIResponse("Miss companyMid", status=BAD)
|
||
|
|
# Gobal 是否后台管理员上传全局查看资料 管理员为 1
|
||
|
|
Global = 2
|
||
|
|
if request.user.is_superuser:
|
||
|
|
Global = 1
|
||
|
|
|
||
|
|
# 将媒体文件统一放到upload下面
|
||
|
|
D_FILE_PATH = FILE_PATH.replace("\\", "/")
|
||
|
|
media_folder = D_FILE_PATH.split("/")[-1]
|
||
|
|
if media_folder != "media": # 线上存储路径
|
||
|
|
NEW_FILE_PATH = os.path.join(D_FILE_PATH, "upload")
|
||
|
|
if not os.path.exists(NEW_FILE_PATH):
|
||
|
|
os.makedirs(NEW_FILE_PATH)
|
||
|
|
else: # 本地开发时用到的路径
|
||
|
|
NEW_FILE_PATH = D_FILE_PATH
|
||
|
|
|
||
|
|
file_dir = os.path.join(NEW_FILE_PATH, "hash") # 存放hash文件的总目录
|
||
|
|
|
||
|
|
h_file_name, ext = os.path.splitext(file_name) # file_name.split(".") # 文件名, 扩展
|
||
|
|
ext = ext.replace(".", "")
|
||
|
|
ext_name = "." + ext # 扩展名
|
||
|
|
file_dir_hash = os.path.join(file_dir, ext, h_file_name) # hash文件下与文件名同名字的文件夹
|
||
|
|
hash_name_new = str(request.user.id) + hash_name
|
||
|
|
hash_dir = os.path.join(file_dir_hash, hash_name_new) # 当前文件hash存储路径
|
||
|
|
|
||
|
|
import time
|
||
|
|
ct = time.time() # 取得系统时间
|
||
|
|
local_time = time.localtime(ct) # 将系统时间转成结构化时间
|
||
|
|
date_head = time.strftime("%Y%m%d", local_time) # 格式化时间
|
||
|
|
date_m_secs = str(datetime.datetime.now().timestamp()).split(".")[-1] # 毫秒级时间戳
|
||
|
|
time_stamp = "%s%.3s" % (date_head, date_m_secs) # 拼接时间字符串
|
||
|
|
|
||
|
|
if not os.path.exists(os.path.join(NEW_FILE_PATH, path_type)):
|
||
|
|
os.makedirs(os.path.join(NEW_FILE_PATH, path_type))
|
||
|
|
save_dir = os.path.join(NEW_FILE_PATH, path_type, date_head)
|
||
|
|
# 如果不存在则创建目录
|
||
|
|
if not os.path.exists(save_dir): # 合并后的文件存储目录
|
||
|
|
os.makedirs(save_dir)
|
||
|
|
|
||
|
|
random_name = time_stamp + generate_random_str() + ext_name
|
||
|
|
save_path = os.path.join(save_dir, random_name) # 合并后的文件存储路径
|
||
|
|
count = len(os.listdir(hash_dir))
|
||
|
|
if count != int(file_total_length): # 长度不相等,还未到达合并要求
|
||
|
|
return HttpResponse(json.dumps({"state": 0}), content_type="application/json")
|
||
|
|
try:
|
||
|
|
temp = open(save_path, 'wb') # 创建新文件
|
||
|
|
for i in range(0, count):
|
||
|
|
fp = open(hash_dir + "/" + hash_name + "_" + str(i) + ext_name, 'rb') # 以二进制读取分割文件
|
||
|
|
temp.write(fp.read()) # 写入读取数据
|
||
|
|
fp.close()
|
||
|
|
temp.close()
|
||
|
|
except Exception as e:
|
||
|
|
temp.close() # 在合并文件块失败的时候关闭临时合并的文件
|
||
|
|
os.remove(save_path) # 删除临时合并的文件
|
||
|
|
raise Exception
|
||
|
|
shutil.rmtree(hash_dir) # 删除
|
||
|
|
self.judge_folder_null(file_dir_hash)
|
||
|
|
|
||
|
|
title = h_file_name # 使用文件名字作为标题
|
||
|
|
url = os.path.join('/upload', path_type, date_head, random_name)
|
||
|
|
url = url.replace("\\", "/")
|
||
|
|
# 将数据写入数据库Archive表
|
||
|
|
Archive.objects.create(
|
||
|
|
MainId=uuid.uuid4().__str__(),
|
||
|
|
Name=title,
|
||
|
|
Ext=ext,
|
||
|
|
ProjectCodes=ProjectCodes,
|
||
|
|
Label=int(type_),
|
||
|
|
Url=url,
|
||
|
|
Remark=Remark,
|
||
|
|
Global=Global,
|
||
|
|
CreateBy=request.user.name,
|
||
|
|
UpdateBy=request.user.name,
|
||
|
|
CreateByUid=request.user.id,
|
||
|
|
UpdateByUid=request.user.id,
|
||
|
|
CreateDateTime=datetime.datetime.now(),
|
||
|
|
UpdateDateTime=datetime.datetime.now(),
|
||
|
|
companyMid=companyMid,
|
||
|
|
title=title
|
||
|
|
)
|
||
|
|
|
||
|
|
data = {
|
||
|
|
"code": 200,
|
||
|
|
"message": "成功",
|
||
|
|
"state": 1
|
||
|
|
}
|
||
|
|
# 操作记录
|
||
|
|
if int(type_) == 1:
|
||
|
|
info = {
|
||
|
|
"des": "上传文档",
|
||
|
|
"detail": "文档名称: " + title
|
||
|
|
}
|
||
|
|
elif int(type_) == 2:
|
||
|
|
info = {
|
||
|
|
"des": "上传PDF文件",
|
||
|
|
"detail": "PDF文件名称: " + title
|
||
|
|
}
|
||
|
|
elif int(type_) == 3:
|
||
|
|
info = {
|
||
|
|
"des": "上传Excel文件",
|
||
|
|
"detail": "Excel文件名称: " + title
|
||
|
|
}
|
||
|
|
elif int(type_) == 4:
|
||
|
|
info = {
|
||
|
|
"des": "上传图片",
|
||
|
|
"detail": "图片名称: " + title
|
||
|
|
}
|
||
|
|
else:
|
||
|
|
info = {
|
||
|
|
"des": "上传ppt文件",
|
||
|
|
"detail": "ppt文件名称: " + title
|
||
|
|
}
|
||
|
|
create_operation_history_log(request, info, OperationHistoryLog)
|
||
|
|
return HttpResponse(json.dumps(data), content_type="application/json")
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error(
|
||
|
|
"user: %s, break point merge file failed: \n%s" % (request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("断点续传合并文件失败", status=500)
|
||
|
|
|
||
|
|
# 判断文件夹是否为空
|
||
|
|
def judge_folder_null(self, path):
|
||
|
|
files = os.listdir(path) # 查找路径下的所有的文件夹及文件
|
||
|
|
if len(files) == 0:
|
||
|
|
shutil.rmtree(path)
|
||
|
|
|
||
|
|
|
||
|
|
class UploadImageAPIView(APIView):
|
||
|
|
"""
|
||
|
|
富文本编辑器上传图片调用的接口
|
||
|
|
"""
|
||
|
|
perms_map = ({"*": "admin"}, {"post": "article_create"}, {"put": "article_update"})
|
||
|
|
authentication_classes = (JSONWebTokenAuthentication,)
|
||
|
|
permission_classes = (IsAuthenticated, RbacPermission)
|
||
|
|
|
||
|
|
def post(self, request):
|
||
|
|
try:
|
||
|
|
# 允许的图片范围
|
||
|
|
imgRange = [".png", ".jpg", ".jpeg", ".gif", ".jfif"]
|
||
|
|
|
||
|
|
file_obj_list = request.FILES.getlist("file")
|
||
|
|
|
||
|
|
is_limit_img = (file_obj_list[0].size / 1024 / 1024) < 5 # 不超过5M
|
||
|
|
if not is_limit_img:
|
||
|
|
return CCAIResponse("文件不许超过5M", status=500)
|
||
|
|
|
||
|
|
ct = time.time() # 取得系统时间
|
||
|
|
local_time = time.localtime(ct) # 将系统时间转成结构化时间
|
||
|
|
date_head = time.strftime("%Y%m%d", local_time) # 格式化时间
|
||
|
|
date_m_secs = str(datetime.datetime.now().timestamp()).split(".")[-1] # 毫秒级时间戳
|
||
|
|
time_stamp = "%s%.3s" % (date_head, date_m_secs) # 拼接时间字符串
|
||
|
|
|
||
|
|
ext = "." + file_obj_list[0].name.split(".")[-1]
|
||
|
|
# 判断文件后缀是否符合要求
|
||
|
|
img_flag = False
|
||
|
|
if ext in imgRange:
|
||
|
|
img_flag = True
|
||
|
|
|
||
|
|
# 文件保存的位置
|
||
|
|
if img_flag:
|
||
|
|
# 将媒体文件统一放到upload下面
|
||
|
|
D_FILE_PATH = FILE_PATH.replace("\\", "/")
|
||
|
|
media_folder = D_FILE_PATH.split("/")[-1]
|
||
|
|
if media_folder != "media": # 线上存储路径
|
||
|
|
NEW_FILE_PATH = os.path.join(D_FILE_PATH, "upload")
|
||
|
|
if not os.path.exists(NEW_FILE_PATH):
|
||
|
|
os.makedirs(NEW_FILE_PATH)
|
||
|
|
else: # 本地开发时用到的路径
|
||
|
|
NEW_FILE_PATH = D_FILE_PATH
|
||
|
|
|
||
|
|
if not os.path.exists(os.path.join(NEW_FILE_PATH, "images")):
|
||
|
|
os.makedirs(os.path.join(NEW_FILE_PATH, "images"))
|
||
|
|
save_dir = os.path.join(NEW_FILE_PATH, "images", date_head)
|
||
|
|
# 如果不存在则创建目录
|
||
|
|
if not os.path.exists(save_dir):
|
||
|
|
os.makedirs(save_dir)
|
||
|
|
# 打开文件,没有新增
|
||
|
|
for each in file_obj_list:
|
||
|
|
random_name = time_stamp + generate_random_str() + ext
|
||
|
|
file_path = os.path.join(save_dir, random_name)
|
||
|
|
destination = open(file_path, 'wb')
|
||
|
|
for chunk in each.chunks():
|
||
|
|
destination.write(chunk)
|
||
|
|
destination.close()
|
||
|
|
url = os.path.join('/upload', "images", date_head, random_name)
|
||
|
|
url = url.replace("\\", "/")
|
||
|
|
# 操作记录
|
||
|
|
info = {
|
||
|
|
"des": "富文本图片上传",
|
||
|
|
"detail": "图片地址: " + url
|
||
|
|
}
|
||
|
|
create_operation_history_log(request, info, OperationHistoryLog)
|
||
|
|
if media_folder != "media": # 线上存储路径
|
||
|
|
url = FILE_HTTP + url
|
||
|
|
return CCAIResponse(url, status=200)
|
||
|
|
else:
|
||
|
|
url = url.replace("upload", "media")
|
||
|
|
url = FILE_HTTP + url
|
||
|
|
return CCAIResponse(url, status=200)
|
||
|
|
else:
|
||
|
|
return CCAIResponse("文件格式错误", status=500)
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, vue-quill-editor upload image failed: \n%s" % (
|
||
|
|
request.user.id, traceback.format_exc()
|
||
|
|
))
|
||
|
|
return CCAIResponse("上传失败", status=500)
|
||
|
|
|
||
|
|
|
||
|
|
class PreviewWordAPIView(APIView):
|
||
|
|
"""
|
||
|
|
预览word文件
|
||
|
|
"""
|
||
|
|
|
||
|
|
# perms_map = ({"*": "admin"}, {"get": "get_preview"})
|
||
|
|
# authentication_classes = (JSONWebTokenAuthentication,)
|
||
|
|
# permission_classes = (IsAuthenticated, RbacPermission)
|
||
|
|
|
||
|
|
def get(self, request):
|
||
|
|
try:
|
||
|
|
file = open(r"C:\Users\Ykim H\Desktop\渡渡鸟知识库用户操作手册.doc", 'rb')
|
||
|
|
response = FileResponse(file)
|
||
|
|
response['Content-Type'] = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
|
||
|
|
response['Content-Disposition'] = 'attachment;filename="渡渡鸟知识库用户操作手册.doc"'
|
||
|
|
return response
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, get doc file failed: \n%s" % (request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("下载渡渡鸟知识库用户操作手册.doc失败", SERVER_ERROR)
|
||
|
|
|
||
|
|
|
||
|
|
class CheckResourceAPIView(APIView):
|
||
|
|
# perms_map = ({"*": "admin"}, {"get": "get_preview"})
|
||
|
|
# authentication_classes = (JSONWebTokenAuthentication,)
|
||
|
|
# permission_classes = (IsAuthenticated, RbacPermission)
|
||
|
|
|
||
|
|
def get(self, request):
|
||
|
|
"""反hook"""
|
||
|
|
try:
|
||
|
|
print("来了")
|
||
|
|
return CCAIResponse("success")
|
||
|
|
except Exception as e:
|
||
|
|
err_logger.error("user: %s, check resource failed: \n%s" % (request.user.id, traceback.format_exc()))
|
||
|
|
return CCAIResponse("资源检测失败", SERVER_ERROR)
|