1.02版本
原来的最初的代码是java版本的,现在用python重写一遍
代码的功能
代码和本地代码的区别
有什么用
实际使用的案例
不能解决的问题
没有上传到github和gitee的原因
效果图
源代码
# %任务,直接转变成 使用dataframe来操作
# % 然后,。。啥,就完成上面这个就行 完成file操作的async
# 打印检索结果的开头和title,条件,限制条件
# 然后提供一个api,自己curl测试一下。
# 然后用docker打包一下。把文件内容传送过去,然后返回处理的结果。
# 然后做一个页面,把内容填写进去,然后点击按钮,另外一边显示出来。
# 做一个备忘录,或者白板功能
# 做一个小程序界面
# 做一个搜索功能,把关键字写进去,然后能够查询出结果
# java也重新写一写
# 3大功能
# 搜索文件夹和文件名称
# (vscode是comman + P)
# 妙:可以选择多个文件夹,添加子文件夹排除条件
# 搜索文件内容
# (vscode command + shift + F)
# 妙: 可以检索多个正则表达式,然后先匹配上的作为变量向下复制,同一行,或者同文件,
# 但是因为没有结束匹配。所以开头和结尾的地方会出现数据不匹配。如果能添加一个结束匹配的正则表达式。那么默认是文件内作为变量
# 提取表格中的列的内容,然后命中分类
# 妙:同一行抽取多个特征,一个正则表达式提取多个token
# 妙:多个正则表达式的命中,同时分类
# 同一行多个命中,分成多行。
# 配置常量
import datetime
from datetime import date
from operator import concat
import os
import asyncio
from asyncio import Lock
import pandas as pd
import numpy as np
# from traceback import print_list
# import tornado
# from threading import Thread
import re
# from typing import Concatenate
def getChildFiles(basePath):
return [f for f in os.listdir(basePath) if os.path.isfile(basePath + f)]
def getChildFolders(basePath):
return [f for f in os.listdir(basePath) if os.path.isdir(basePath + f)]
isFirstExcelOutput = True
# mac的设置里面一旦访问过了,就会有允许和不允许,下面的是可移除卷宗,然后网络卷宗现在vscode是没有勾选上
# async def使用方法
# https://superfastpython.com/asyncio-async-def/
# https://docs.python.org/3/library/index.html
# 正则表达式 中文例子 https://www.jb51.net/article/177521.htm
# https://blog.csdn.net/weixin_40907382/article/details/79654372
# 官网 正则表达式 https://docs.python.org/3/library/re.html
async def writeToFile(filout, finalStrArr, lock: Lock, oneFileData: pd.DataFrame):
async with lock:
# for finalStr in finalStrArr:
# filout.wirte(oneFileData.)
# note 输出的是,有的是多个空格的字符
# oneFileData.to_string(filout)
#
# filout.write("\n\n")
# filout.write("".join(finalStrArr))
# 不包含表头,表头已经打印出来了。
oneFileData.to_csv(filout, sep='\t', index=False, header=None)
# 不写到excel文件了。因为excel文件不知道什么位置是文件末尾。没办法append。
# 如果要append,需要用到pd.ExcelWriter mode=append 然后sheet名称,开始的行数是maxrow
file_path = '您的输出文件/output/test01.xlsx'
global isFirstExcelOutput
oneFileData = oneFileData.fillna(" ")
if isFirstExcelOutput:
oneFileData.index.name = "No"
oneFileData.columns.name = "No2" # 这个设置了好像就显示不出来了。
oneFileData.index = oneFileData.index + 1
# oneFileData.rename(columns={"result1":"result1"+"\nresult1_1"}, inplace=True)
multHd = []
multHd.append((t_hitNos,""))
resultNoCnt = 1
for kw in searchKwsArr:
multHd.append((t_result_tmp+str(resultNoCnt),kw))
resultNoCnt+=1
multHd.append((t_hitNos,""))
multHd.append((t_hitKws,""))
multHd.append((t_lineContent,""))
oneFileData.columns = pd.MultiIndex.from_tuples(multHd,names=["titles","keywords"])
# oneFileData.columns = pd.MultiIndex.from_tuples([("lineNo",""),("result1",""),("result2", "result1_1"),("result3",""),("result4",""),("result5",""),("hitNos",""),("hitKws",""),("lineContent","")])
# oneFileData.columns[2] = ("result1",r"(在|到).+里")
oneFileData.to_excel(file_path)
isFirstExcelOutput = False
else:
with pd.ExcelWriter(file_path, mode='a', if_sheet_exists='overlay') as writer:
oneFileData.index = oneFileData.index + 1 - 1 + writer.sheets['Sheet1'].max_row
oneFileData.to_excel(writer, sheet_name='Sheet1', startrow=writer.sheets['Sheet1'].max_row, header=None)
# with pd.ExcelWriter(file_path) as writer:
# oneFileData.to_excel(writer, sheet_name='Sheet1', startrow=writer.sheets['Sheet1'].max_row, header=None)
# oneFileData.to_excel(writer, sheet_name='Sheet1', startrow=writer.sheets['Sheet1'].max_row, index=False, header=None)
# oneFileData.to_excel("您的输出文件output/test01.xlsx")
# print("fileout" + str(datetime.datetime.now()))
# def writeToFile(filout, finalStr):
# filout.write(finalStr)
def multiMatch(content, kwsArr):
for kw in kwsArr:
if re.match(kw, content):
return True
return False
excFileType = [
r"^\._.*",
r".*\.xls.*"
]
incFileType = [
r"^[^\.]+\.[^\.]+"
]
searchKwsArr = [
r"(在|到)[^,。]+里",
r"忽然[^,。]+",
r"[^,。]+一般",
r"像[^,。]+",
r"是[^,。]+"
]
t_lineNo="lineNo"
t_result_tmp="result"
t_hitNos="hitNos"
t_hitKws="hitKws"
t_lineContent="lineContent"
async def searchInFile(f, basePath, filout, lock: Lock):
print("filename: " + f)
# if not re.match(r"^\._.*", f) and not re.match(r".*\.xls.*", f):
if not multiMatch(f,excFileType):
# if not re.match(r"^\.", f):
col_title=[t_lineNo]
resultNoCnt = 1
for kw in searchKwsArr:
col_title.append(t_result_tmp+str(resultNoCnt))
resultNoCnt+=1
# col_title.extend([t_hitNos,t_hitKws,t_lineContent])
col_title.append(t_hitNos)
col_title.append(t_hitKws)
col_title.append(t_lineContent)
with open(basePath + f, "r") as file:
one_file_result = pd.DataFrame(columns=
col_title)
finalStrArr = []
# ["lineNo","result1","result2","result3","result4","result5","hitNos","hitKws","lineContent"])
# one_file_result =
# note 明明可以看到append,但是提示没有这个append,说是一种方法是降低版本,但是因为和很多其他裤捆绑,所以不建议
# pip install pandas==1.3.4
# 大多数还是说用concat来代替
# one_file_result = pd.concat([one_file_result,pd.DataFrame({"lineNo":[5],"result1":["tmp"],"result2":["tmp"],"result3":["tmp"],"result4":["tmp"]
# ,"result5":["tmp"],"hitNos":["tmp"],"hitKws":["tmp"],"lineContent":["tmp"]})], ignore_index=True)
# one_file_result.add(pd.DataFrame({"lineNo":5,"result1":"tmp","result2":"tmp","result3":"tmp","result4":"tmp","result5":"tmp"
# ,"hitNos":"tmp","hitKws":"tmp","lineContent":"tmp"}), ignore_index=True)
# one_file_result = pd.append([one_file_result,pd.DataFrame({"lineNo":5,"result1":"tmp","result2":"tmp","result3":"tmp","result4":"tmp","result5":"tmp"
# ,"hitNos":"tmp","hitKws":"tmp","lineContent":"tmp"})], ignore_index=True)
# print(one_file_result)
linNo = 0
lines = file.readlines()
for line in lines:
linNo += 1
ptStrs = list()
resultPD_key = pd.DataFrame(columns=col_title)
ptStrTmp = str(linNo) + "\t"
resultPD_tmp = pd.DataFrame(columns=col_title)
resultPD_tmp.loc[0,t_lineNo]=linNo
maxFnd = 0
hitKws = []
hitNos = []
kwsSeq = 0
# for pp in [r"https://hXXXXXXXXXXXXXXXXXXl/[0-9]+\.html"]:
# for pp in [r"(在|到).+里", r"忽然[^,。]+", r"[^,。]+一般", r"像[^,。]+", r"是[^,。]+"]:
for pp in searchKwsArr:
kwsSeq = kwsSeq + 1
# for pp in [r".风.", r".香", r"一.", r".{2,4}(地)" , r"荷.", r".塘", r"月.", r".色"]:
lastFnd = "\t"
findCnt = 0
for m in re.finditer(
pp
, line
, flags=re.IGNORECASE):
findCnt += 1
if findCnt > maxFnd:
maxFnd = findCnt
ptStrs.append(ptStrTmp)
resultPD_key = pd.concat([resultPD_key,resultPD_tmp], ignore_index=True)
ptStrs[findCnt-1] = ptStrs[findCnt-1] + pp + ": " + m.group() + "\t"
# resultPD_key.loc[findCnt-1,t_result_tmp+str(kwsSeq)] = pp + ": " + m.group()
resultPD_key.loc[findCnt-1,t_result_tmp+str(kwsSeq)] = m.group()
lastFnd = pp + ": " + m.group() + "\t"
hitNos.append(str(kwsSeq))
hitKws.append(pp)
if False:
ptStrTmp = ptStrTmp + lastFnd
else:
ptStrTmp = ptStrTmp + "\t"
# pd这里单个key搜索就不用填充了
notfnd = 0
for fnd in ptStrs:
notfnd += 1
if notfnd > findCnt:
ptStrs[notfnd-1] = ptStrs[notfnd-1] + "\t"
# 统计一行的命中结果
fndNo = 0
for fnd in ptStrs:
fndNo += 1
ptStrs[fndNo-1] = ptStrs[fndNo-1] + ";"+";".join(hitNos) +";"+ "\t" +";"+ ";".join(hitKws) +";"+ "\t"
# for i in range(0,maxFnd-1):
# 这里是单行搜索,单行的多个结果拼接到一起
if maxFnd > 0:
finalStr = ""
for st in (ptStrs): finalStr = finalStr + st + line # + "\n"
finalStrArr.append(finalStr)
resultPD_key[t_hitNos]=";".join(hitNos)
resultPD_key[t_hitKws]="【"+"】;【".join(hitKws)+"】"
resultPD_key[t_lineContent]=line.replace("\n","").replace("\r","")
one_file_result = pd.concat([one_file_result,resultPD_key], ignore_index=True)
# one_file_result = one_file_result.fillna({t_result_tmp+str(1):"b"})
# writeToFile(filout, finalStr)
# print(one_file_result)
# one_file_result.columns[2].
await asyncio.create_task(writeToFile(filout, finalStrArr, lock, one_file_result))
async def searchInFolder(basePath, filout, lock: Lock):
tasklist = []
for fo in getChildFolders(basePath):
asyncio.create_task(searchInFolder(basePath + fo + "/", filout, lock))
files = getChildFiles(basePath)
for f in files:
tasklist.append(asyncio.create_task(searchInFile(f, basePath, filout, lock)))
# if f
await asyncio.wait(tasklist)
async def main():
lock = Lock()
starttime =datetime.datetime.now()
basePaths = ['/Volumes/SDCARD_01/tmp/']
filout = open("/Volumes/SDCARD_01/output/"+"output.txt","w")
filout.write("excFileType:" + "\n")
filout.write("\t" + "\n\t".join(excFileType) + "\n")
filout.write("incFileType:" + "\n")
filout.write("\t" + "\n\t".join(incFileType) + "\n")
filout.write("searchKwsArr:" + "\n")
filout.write("\t" + "\n\t".join(searchKwsArr) + "\n")
filout.write("basePaths:" + "\n")
filout.write("\t" + "\n\t".join(basePaths) + "\n")
titleStr = "lineNo\t"
titleStrDes = "\t"
resultNo = 1
for kw in searchKwsArr:
titleStr = titleStr + "result" + str(resultNo) + "\t"
titleStrDes = titleStrDes + kw + "\t"
resultNo = resultNo + 1
titleStr = titleStr + "hitNos" + "\t" + "hitKws" + "\t" + "lineContent" + "\t"
filout.write(titleStr + "\n")
filout.write(titleStrDes + "\n")
task_fol_list = []
for basePath in basePaths:
task_fol_list.append(asyncio.create_task(searchInFolder(basePath, filout, lock)))
await asyncio.wait(task_fol_list)
# await coro
print('search complete!')
print("start" + str(starttime))
print("end " + str(datetime.datetime.now()))
# 2024-03-04 21:53:57.998985
# 2024-03-04 21:53:58.041339
# 2024-03-04 22:10:00.298639
# 2024-03-04 22:10:00.443002
# async
# 2024-03-04 21:55:17.430653
# 2024-03-04 21:55:17.490983
# lock
# 2024-03-04 22:07:11.735860
# 2024-03-04 22:07:11.850801
# 2024-03-04 22:11:36.540289
# 2024-03-04 22:11:36.595845
# create task
# start2024-03-04 22:40:18.462565
# end 2024-03-04 22:40:18.653983
if __name__ == "__main__":
# loop = asyncio.get_event_loop()
# result = loop.run_until_complete(main())
asyncio.run(main())
# print(date.ctime())
def foldersSample():
basePath = '您的检索文件夹的路径/'
print("当前目录下的文件夹名称为:", getChildFolders(basePath))
# print("当前目录下的文件夹名称为:", getChildFolders(basePath))
files = getChildFiles(basePath)
print("当前目录下的文件名称为:", getChildFiles(basePath))
# TODO 觉得可以修改一下快捷键 ctrl + K
# TODO 读取文件,按照行读取,哪个好
# TODO 文件名可以先用正则表达式筛选一下。如果是多次匹配来试一下比如a有两个,测试的时候print一下
# foldersSample()
def sample():
pattern = re.compile("(d)[o|a](g)")
matc = pattern.search("abcdogabcdagabc") # Match at index 0
matc = pattern.search("abcdogabcdagabc",3) # Match at index 0
matcs = re.findall(pattern, "abcdogabcdagabc", flags=0)
print(re.findall(re.compile("c(d([o|a])g)"), "abcdogabcdagabc", flags=0))
iter = re.finditer(re.compile("c(d([o|a])g)"), "abcdogabcdagabc", flags=0)
for m in re.finditer(
"c(d([o|a])g)"
, "abcdogabcdagabc"
, flags=re.IGNORECASE):
print(m.group())
for g in m.groups():
print(g)
print(m.span())
# 应该用findall就能满足了。就是没有all的index,
print(re.match(r'l','liuyan1').group())
print(re.match(r'y','liuyan1'))
print(re.search(r'y','liuyan1').groups())
pattern.search("dog", 1) # No match; search doesn't include the "d"
# sample()
# 协程使用方法
# asyncio walkthrough
# https://realpython.com/async-io-python/
# Coroutines and Tasks官网文档
# https://docs.python.org/3/library/asyncio-task.html
# async def main2():
# print('hello')
# await asyncio.sleep(1)
# print('world')
# loop = asyncio.get_event_loop()
# result = loop.run_until_complete(main2())