提交 63de4116 编辑于 作者: ParanoiaSYT's avatar ParanoiaSYT
浏览文件

modify

上级 c5b8faea
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
##搜索路径(目录列表)site-packages位置专门存放模块 ##搜索路径(目录列表)site-packages位置专门存放模块
import sys import sys
print(sys.path) print(sys.path)
sys.path.append('/Users/sunyuting/YuC-Study/自制模块') #可以导入想要的路径 # sys.path.append('/Users/sunyuting/YuC-Study/自制模块') #可以导入想要的路径
print(sys.path) print(sys.path)
import hello import hello
......
...@@ -14,9 +14,12 @@ for row in ws.iter_rows(min_col=2,min_row=2,max_col=5,max_row=5): ...@@ -14,9 +14,12 @@ for row in ws.iter_rows(min_col=2,min_row=2,max_col=5,max_row=5):
# row是每一行,row[3]就是每一行的第4列位置(注意是从min_col开始算的) # row是每一行,row[3]就是每一行的第4列位置(注意是从min_col开始算的)
ws[row[3].coordinate]='=SUM(%s:%s)'%(row[0].coordinate,row[2].coordinate) ws[row[3].coordinate]='=SUM(%s:%s)'%(row[0].coordinate,row[2].coordinate)
center_alignment=openpyxl.styles.Alignment(horizontal='center',vertical='center') center_alignment=openpyxl.styles.Alignment(horizontal='center',vertical='center')
for row in ws.iter_rows(min_col=2,min_row=2,max_col=6,max_row=5): for row in ws.iter_rows(min_col=2,min_row=2,max_col=6,max_row=5):
ws[row[4].coordinate]='=IF(%s>250,"A","B")'%(row[3].coordinate) ws[row[4].coordinate]='=IF(%s>250,"A","B")'%(row[3].coordinate)
# IF函数,如果条件成立则赋值A,条件不成立则赋值B
# Excel里只认双引号解析 # Excel里只认双引号解析
ws[row[4].coordinate].alignment=center_alignment ws[row[4].coordinate].alignment=center_alignment
......
import matplotlib.pyplot as plt
import numpy as np
for i in range(10,15):
x=np.linspace(0,i,11)
y=np.linspace(0,i,11)
plt.figure()
plt.plot(x,y)
plt.xlabel("x轴")
plt.ylabel('y轴')
plt.show()
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import random
import time
def url_open(url):
headers={'User-Agent':UserAgent().random}
response=requests.get(url,headers=headers,timeout=20)
html=response.text
return html
def content_get():
number=0
paper=[]
for i in range(20):
number+=1
# %用来转义,%%表示输出%
url="https://journals.aps.org/prl/recent?toc_section%%5B%%5D=general-physics-statistical-and-quantum-mechanics-quantum-information-etc&page=%d " % number
html=url_open(url)
soup=BeautifulSoup(html,"lxml")
title_list=soup.find_all("h5",class_="title")
for j in range(10):
try:
title=title_list[j].a.text
if "qubit" in title:
paper.append(title)
except:
pass
# time.sleep(2)
return paper
paper=content_get()
for k in range(len(paper)):
print(paper[k])
\ No newline at end of file
...@@ -14,7 +14,9 @@ def url_open(url): ...@@ -14,7 +14,9 @@ def url_open(url):
def content_return(): def content_return():
url_list=[ url_list=[
'https://www.nature.com/search?article_type=protocols,research,reviews&subject=physics' 'https://www.nature.com/search?article_type=protocols,research,reviews&subject=physics',
'https://arxiv.org/list/quant-ph/new',
] ]
url0=url_list[0] url0=url_list[0]
html0=url_open(url0) html0=url_open(url0)
...@@ -27,7 +29,18 @@ def content_return(): ...@@ -27,7 +29,18 @@ def content_return():
link='https://www.nature.com'+title_list[i].a['href'] link='https://www.nature.com'+title_list[i].a['href']
date=date_list[i].text.strip().replace('\n','') date=date_list[i].text.strip().replace('\n','')
items0.append([title,date,link]) items0.append([title,date,link])
# url1=url_list[1]
# html1=url_open(url1)
# soup1=BeautifulSoup(html1,"lxml")
# title_list=soup1.find_all("div",class_="list-title mathjax")
# for i in range(len(title_list)):
# title=title_list[i].text.strip()
# print(title)
# print(items0) # print(items0)
# print(title_list)
return items0 return items0
if __name__=='__main__': if __name__=='__main__':
......
# number=0
# str1='https://journals.aps.org/prl/recent?toc_section%%d5B%%5D=general-physics-statistical-and-quantum-mechanics-quantum-information-etc&page=%d'%number
# print(str1,range(10))
# for i in range(10):
# # number+=1
# print(i)
# # print(number)
#
#
# if "qu" in "opshjyu" or "op" in :
# print("good")
import numpy as np
s1=np.linspace(0,9,10)
print(s1,type(s1))
print(s1[1])
for i in range(-20,-18):
print(i)
print("fish%d"%i)
\ No newline at end of file
支持 Markdown
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册