study_tushre.py:
import tushare as ts
import pandas as pd
# print(ts.__version__)
# ts.sz_margins(start='2015-01-01', end='2015-04-19')
# df = ts.profit_data(top=60)
# df.sort_values('shares',ascending=False)
def get_stock_scope():
#财经数据接口tushure官方网站:http://www.waditu.cn/
stock1 = ts.get_hist_data('600001', '1995-01-01', '2018-13-31', ktype='M')
# ktype= [D是日末,W是周末,M是月末,15=15分钟 30=30分钟 60=60分钟,默认为D]
# 取股票代码为600001从'1995-01-01', '2018-13-31'的股票指数
# print(stock1.sort_values(axis = 0,ascending = True,by=['date'])) #axis=0表示按列排序
# print(stock1['date'])
# sh = pd.concat( [stock1['close'] ,stock1['open']],axis=1) # axis=0为在行上扩展,都放在一行里,axis=1在列上扩展,放为两列
sh = stock1[['close', 'open']] # 这里无法获取股票的date,但实际上它有date数据,所以只能先存入.csv然后再读取,sh.columns赋给他date列
sh.to_csv("./data/上证收盘.csv")
sh = pd.read_csv("./data/上证收盘.csv")
sh.columns = ['date', 'close', 'open']
# print(str(sh['date']).split("-")[1])
print(sh[(sh['close'] >= 10) & (sh['open'] >= 9)])
# print(sh[str(['date']) == '2018-12-31'])
def test3():
# 模糊表达式匹配来源:https://blog.csdn.net/yyhhlancelot/article/details/82228803
stock1 = ts.get_hist_data('399001', '1995-01-01', '2018-13-31', ktype='M')
sh = stock1[['close']]
sh.to_csv("./data/深证收盘.csv")
sh = pd.read_csv("./data/深证收盘.csv")
sh.columns = ['date', 'close']
bool = sh['date'].str.contains(r'[0-9]{4,}-12?-[0-9]{2,}') # 不要忘记正则表达式的写法,'.'在里面要用'\.'表示
print('bool : \n', bool)
# print(sh[(sh['date'] == '')])
# print(sh[sh['date'] >= 10])
print(sh[bool])
sh[bool].to_csv('./data/深证年末_第三题.csv')
def test4():
# 模糊表达式匹配来源:https://blog.csdn.net/yyhhlancelot/article/details/82228803
stock2 = ts.get_hist_data('000001', '1995-01-01', '2018-13-31', ktype='M')
sh2 = stock2[['close']]
sh2.to_csv("./data/上证收盘.csv")
sh2 = pd.read_csv("./data/上证收盘.csv")
sh2.columns = ['date', 'close']
bool2 = sh2['date'].str.contains(r'[0-9]{4,}-12?-[0-9]{2,}')
# 不要忘记正则表达式的写法,'.'在里面要用'\.'表示,这里是取所有年份的12月的数据,?匹配一次以上,+匹配0次以上
print('bool2 : \n', bool2)
# print(sh[(sh['date'] == '')])
# print(sh[sh['date'] >= 10])
print(sh2[bool2])
sh2[bool2].to_csv('./data/上证年末_第三题.csv')
def test():
stock1 = ts.get_hist_data('600001', '1995-01-01', '2018-13-31', ktype='M')
df = stock1[['close', 'open']]
df.to_csv("./data/test.csv")
# print(ts.get_realtime_quotes('sh'))
def fliter_end_of_month():
# 2019年11月20日第三题取深证综指的2014-2018年的月末收盘价数据,但是数值小数点和大数据实训平台的不一样,错误
df = ts.get_hist_data('399106', '2013-12-32', '2018-12-31', ktype='M')
df1 = df[['close']]
df1.to_csv("./data/深证综指月末收盘价tushare.csv")
df1 = pd.read_csv("./data/深证综指月末收盘价tushare.csv")
df1.columns = ['date', 'close']
print(df1)
df1.to_csv("./data/深证综指月末收盘价tushare.csv")
def fliter_end_of_month_fromxls():
# 对从大数据实训平台上查询到的2014-2018年的月末收盘价数据进行清洗,留下月末收盘价
df = pd.read_excel("./data/深证综指(399106)201911200124278806.xls")
# print(df.iloc[0, 0]) # df.iloc[行号,列号] 从零开始
df.columns = ['date', 'close'] # 更换行号,不然我下面无法写列名来取数据
df['date'] = pd.to_datetime(df['date']) # 将数据类型转换成日期类型
df = df.set_index('date') # 将日期设为索引
end_of_month_list = []
for i in range(2014, 2019):
for j in range(1, 13):
end_of_month = str(df['{0}-{1}'.format(i, j)].index.max())[0:10]
# print(end_of_month) # 求已有的数据中每月最后一天,不是月末。
end_of_month_list.append(end_of_month)
print(end_of_month_list)
df_sum = df[end_of_month_list[0]]
for i in end_of_month_list[1:]:
df_sum = pd.concat([df_sum, df[i]])
print(df_sum)
print(df_sum.reindex(index=df_sum.index[::-1])) # 将数据反转输出
# print(df.resample('M').sum()) # 按月度统计求和并显示
# print(df['2014-12']) # 显示2014年12月的所有数据
# python新增一列:https://www.zhihu.com/question/277824046
# df['day'] = df.apply(lambda x: x.date.split("-")[2])
def get_month_close():
# 华泰证券12题
xian_tourism = ts.get_k_data(code='002033', ktype='M', start='2015-0-0', end='2017-12-31', autype='hfq') #hfq后复权
date_close = xian_tourism[['date', 'close']]
print(date_close.reindex(index=date_close.index[::-1])) # 将数据反转输出
def get_data_and_reverse():
# 华泰证券12题
df = pd.read_excel('C:/Users/Admin/Downloads/上证综指000001201911231952573857.xls')
df = df.reindex(index=df.index[::-1]) # 将数据反转输出
df.to_csv('C:/Users/Admin/Downloads/上证综指000001201911231952573858.csv')
def shen_yang_close():
# 沈阳新松机器人自动化股份有限公司,第二题:请运用tushare包收集公司2018年12月28日年末无复权收盘价(接口:get_k_data)。
# df = ts.get_k_data('300024', ktype='M', start='2018-12-1', end='2019-2-1', autype=None)
# print(df)
# 沈阳新松机器人自动化股份有限公司,第八题:请运用tushare包收集机器人2013-2018年的年末无复权收盘价(接口:get_k_data)并计算2014-2018年每年的涨跌幅。
df = ts.get_k_data('300024', ktype='M', start='2013-12-1', end='2018-12-31', autype=None)
df1 = df[["date", "close"]]
get_end_of_year_close_from_DataFrame(df1)
# df.to_csv("./data/沈阳新松机器人自动化股份有限公司.csv")
def get_end_of_year_close_from_DataFrame(df): # 取DataFrame里的年末数据
# df.to_csv("./data/沈阳新松机器人自动化股份有限公司.csv")
# # df = pd.read_csv("./data/沈阳新松机器人自动化股份有限公司.csv")
df = df[['date', 'close']]
df.columns = ['date', 'close'] # 更换行号,不然我下面无法写列名来取数据
df['date'] = pd.to_datetime(df['date']) # 将数据类型转换成日期类型
df = df.set_index('date') # 将日期设为索引
end_of_month_list = []
for i in range(2013, 2019):
j = 12
end_of_month = str(df['{0}-{1}'.format(i, j)].index.max())[0:10]
# print(end_of_month) # 求已有的数据中每月最后一天,不是月末。
end_of_month_list.append(end_of_month)
print("end_of_month_list:", end_of_month_list)
# loc()参考来源:https://www.cnblogs.com/hhh5460/p/5595616.html
print(df.loc[end_of_month_list[0], 'close']) # 取索引为2013-12-31,列名为close的单元格,它的返回值为series,无法to_csv,
# df_sum = df.loc[end_of_month_list[0]].to_frame() # 使用to_frame()把series转换成dataframe
data = []
for i in end_of_month_list:
data.append(df.loc[i, 'close'])
df_sum = pd.DataFrame({'date':end_of_month_list, 'close':data})
print(df_sum)
print(df_sum.reindex(index=df_sum.index[::-1])) # 将数据反转输出
def stock_ups_and_downs_percent():
# 2014涨跌幅:
a = [1304.44, 1471.76, 2714.05, 1962.06, 1752.65, 1250.53]
a = [48.7, 39.39, 68.50, 21.38, 18.82, 13.22]
for i in range(5):
percent = (a[i+1]-a[i])/a[i]
print("{:.5}".format(percent))
# test3()
# get_stock_scope()d
# test4()
# test()
# fliter_end_of_month()
# fliter_end_of_month_fromxls()
# get_month_close()
# get_data_and_reverse()
shen_yang_close()
# stock_ups_and_downs_percent()
'''
ERROR1'DataFrame' object has no attribute 'sort'
solution:https://blog.csdn.net/weixin_39777626/article/details/78760076
解决:将“sort”改为“sort_values”
ERROR2:用tushare从网上搞的股票数据小数点会和那个大数据中心的不一样,会出错,所以要先从大数据实训平台搞出数据,
然后再用pandas清洗
'''
python_basic.py
'''
###基本使用
print("hello world")
print(int('2')+5.5)
print(float('2.3')+3)
print("***********")
1+1
1/9
a = 1
t=0
###while与for的使用
while t!=3: #缩进表示括号 !!!冒号不能缺!!!
a+=2
t+=1
print(t)
print(a)
example_list = [1,2,3,4,5,6,7,12,543,876,12,3,2,5]
for i in example_list: #!!!冒号不能缺!!!
print(i)
print('inner of for')
print('outer of for')
...
###列表list
list1 = [1,2,3,4,5,11,7,8,9,34,35] #创建一个表
list2 = [11,22,33,44,55,66,77,88]
for i in range(1,10,2): #输出序列区间【1,9)左闭右开,的数字!!!冒号不能缺!!!
print(i)
a = 'fuckyou'
print(a*2) #连续输出两遍
print(len(a)) #读取长度
###文件读取
text = 'fuck you #1 \nfuck you #2 \nfuck you #3 \nendline'
a_file = open('123.txt','w')
a_file.write(text)
a_file.close
read_file = open('123.txt','r') #打开一个名为123的文件
#content = read_file.readline() #读取第一行
#content2 = read_file.readline() #读取第二行
#print(content,content2) #输出第一第二行
print(-------)
all = read_file.readlines() #按行读取
#print(all) #如果直接输出就会很难看
print(-------)
for item in all:
print(item) #
print(-------)
read_file.close #记得关闭文件
###类
class Calculator:
name='good calculator'
price=18
def __init__(self,name,price,height,width,weight): # 注意,这里的下划线是双下划线
self.name=name
self.price=price
self.h=height
self.wi=width
self.we=weight
#self用于实例化对象
#详见https://blog.csdn.net/daocaoren1543169565/article/details/80626035
'''
'''
###input输入
scan = input('please input a number:\n');
print(scan)
#因为下面它通过scan2的值作为判断条件,所以返回值必为int
scan2 = int(input('please input the floor that you want to arrive:\n'))
if scan2 >=5: #!!!注意冒号!!!不过你在Spyder里编程它会自动帮你添加冒号
print('Too high!')
elif scan2 == 4:
print('level 4 arrived!')
elif scan2 == 3:
print('level 3 arrived!')
else:
print('Go to the hell!')
print('傻逼')
'''
'''
#快速排序
def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)
print(quicksort([3,6,8,10,1,2,1]))
'''
'''
from numpy import *
a=eye(4)
print a
'''
'''
###range的用法
a = [1,2,3,4,5]
for i in range(0,10,3):
print(i)
'''
'''
import re
# 解释p = re.compile(r'\d+')
# 查找数字
# r表示字符串不转义?(反斜杠)\d表示一个数字,后边这个加号表示至少出现一次数字,
# 整句话表示把字符串中的连着的数字compile出来
p = re.compile(r'\d+')
# 在字符串中"oneldfdsfds3134213efds324324dfdsfdsfds1"中进行查找,按照规则p制定的正则进行查找
# 返回结果是None表示没有找到,否则会返回。
m = p.match("onel2fdsfds3134213efds324324dfdsfdsfds1")
print (m)
import math
print(math.fabs(-1))
'''
def create_turple():
tuple1 = 3, # 创建元组的极简方法
print(tuple1)
# 依据每个元素转换成字符串后的长度,相同长度的数值顺序不变,对列表进行升序排序并返回一个排序后的新列表。
def study_lambda():
alist = [1, 2, 4, 6, 11, 13, 45, 56]
alist.reverse()
print('逆序排列a列表后:', alist)
alist.sort(key=lambda x: len(str(x)))
print('使用lambda按长度排列alist之后:', alist)
# 如果用sorted就会报错,但是如果你在一个新的文件里用sorted就可以用,sorted不改变列表中的值,而sort()会改变
a = [1, 2, 3, 4]
print('使用lambda、map输出2^n', list(map(lambda i: i**2, a)))
square_loss = lambda x, average: sum([(i-average) ** 2 for i in x])*1.0/len(x)
print('使用lambda计算方差:', square_loss(x=[1, 2, 3], average=2))
b = [('wang', 1), ('yang', 5), ('li', 2)]
b.sort(key=lambda x: x[1])
print('使用lambda来迭代排序:', b)
# 这里的lambda对b中的内容进行了迭代
def list_deduction(): # 列表推导式
a = [[1, 3, 5, 56, -2, -6, 2], [3, -3, 4, -6]]
b = [j for i in a for j in i if(j < 0)]
print('列表推导式重写函数:', b)
vec = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
c = [[num[i] for num in vec] for i in range(4)]
print('转置矩阵:', c)
print('序列解包转置矩阵:', list(zip(*vec))) # 也可以使用内置函数来实现矩阵转置,先序列解包,然后用zip生成迭代对象
def list_unpack_package(): # 序列解包
s = {'a':1, 'b':2, 'c':3}
b, c, d = s.items()
print('b =', b, 'c =', c,'d =', d) # 序列解包字典时,顺序可能是乱的
b, c, d = s.values()
print('b =', b, 'c =', c, 'd =', d)
print('序列解包*[1, 2, 3], 4:', *[1, 2, 3], 4)
print('序列解包*range(4):', *range(4))
print('序列解包**{"a":1, **{"b":2}}:', {'a': 1,** {'b': 2}})
range_tuple = *range(10), # 必须加上逗号,因为序列解包range(10)后会生成元组,而生成元组用tuple(1,2)或者直接写1,2加逗号,但这里序列解包用tuple不行
print('序列解包*range(10):', range_tuple)
def dict_use():
# 首先生成包含1000个随机字符的字符串,然后统计每个字符的出现次数。
import string
import random
x = string.ascii_letters + string.digits + string.punctuation
y = [random.choice(x) for i in range(1000)]
z = ''.join(y)
# print(z)
d = dict() # 使用字典保存每个字符出现次数
for ch in z:
d[ch] = d.get(ch, 0) + 1
print(d)
z = 'apple'
d = dict() # 使用字典保存每个字符出现次数
for ch in z:
d[ch] = d.get(ch, 0) + 1
print(d)
def sorted():
phonebook = {'Linda': '7750', 'Bob': '9345', 'Carol': '5834'}
from operator import itemgetter
print(sorted(phonebook.items(), key=itemgetter(1))) # 按字典中元素值进行排序
# [('Carol', '5834'), ('Linda', '7750'), ('Bob', '9345')]
print(sorted(phonebook.items(), key=itemgetter(0))) # 按字典中元素的键进行排序
# [('Bob', '9345'), ('Carol', '5834'), ('Linda', '7750')]
def map_use():
a = ['1', '2', '3']
b = list(map(int, a))
# print(type(b[0]))
print(b)
def split_patition():
s = "apple,peach,banana,pear"
li = s.split(",") # ["apple", "peach", "banana", "pear"]
print(s.partition(',')) # ('apple', ',', 'peach,banana,pear')
print(s.rpartition(',')) # ('apple,peach,banana', ',', 'pear')
print(s.rpartition('banana')) # ('apple,peach,', 'banana', ',pear')
s = "2014-10-31"
t = s.split("-")
print(t) # ['2014', '10', '31']
print(list(map(int, t))) # [2014, 10, 31]
'''对于split()和rsplit()方法,如果不指定分隔符,则字符串中的任何空白符号(包括空格、换行符、制表符等等)
都将被认为是分隔符,返回包含最终分割结果的列表。rsplit则是从右端开始分割匹配。
'''
s = 'hello world \n\n My name is Dong '
print(s.split()) # ['hello', 'world', 'My', 'name', 'is', 'Dong']
s = '\n\nhello world \n\n\n My name is Dong '
print(s.split()) # ['hello', 'world', 'My', 'name', 'is', 'Dong']
s = '\n\nhello\t\t world \n\n\n My name\t is Dong '
print(s.split()) # ['hello', 'world', 'My', 'name', 'is', 'Dong']
def lower_upper_capitalize_title_swapcase():
s = "What is Your Name?"
print(s.lower()) # 返回小写字符串 'what is your name?'
print(s.upper()) # 返回大写字符串 'WHAT IS YOUR NAME?'
print(s.capitalize()) # 字符串首字符大写 'What is your name?'
print(s.title()) # 每个单词的首字母大写 'What Is Your Name?'
print(s.swapcase()) # 大小写互换 'wHAT IS yOUR nAME?'
def replace():
# 测试用户输入中是否有敏感词,如果有的话就把敏感词替换为3个星号 ** *。
words = ('测试', '非法', '暴力', '话')
text = '这句话里含有非法内容'
for word in words:
if word in text:
text = text.replace(word, '***')
print(text)
# '这句***里含有***内容'
def maketrans_translate():
# 字符串对象的maketrans()
# 方法用来生成字符映射表,而translate()
# 方法用来根据映射表中定义的对应关系转换字符串并替换其中的字符,使用这两个方法的组合可以同时处理多个不同的字符,replace()
# 方法则无法满足这一要求。
# 创建映射表,将字符"abcdef123"一一对应地转换为"uvwxyz@#$"
table = ''.maketrans('abcdef123', 'uvwxyz@#$')
s = "Python is a greate programming language. I like it!"
# 按映射表进行替换
print(s.translate(table))
# 'Python is u gryuty progrumming lunguugy. I liky it!'
def regular_expression():
# https://blog.csdn.net/panda_AJ/article/details/71043200
import re
a = "2019+20+10 1/30,123"
a = re.split('[+/ ,]', a)
print(a)
a = '12CNY'
a = re.split('[(CNY)|(USD)]', a) # ()小括号表示匹配整个字符串,|表示匹配任意一个
print(a)
def python_sql():
# 来源:https://zhuanlan.zhihu.com/p/37856914
import pandas as pd
import numpy as np
data1 = {'one': [1, 1, 1, 5, 6, 7, 8], 'two': [1, 1, 2, 6, 7, 8, 9], 'three': [1, 1, 2, 2, 2, 2, 2]}
data1 = pd.DataFrame(data1, index=list('abcdefg'))
print("data1:",data1)
print(data1[data1["three"] == 1].groupby(by=['two', 'one']).size())
# 如同sql语句: select two, one, count(*) from data1 where three == 1 group by two, one
# study pandas
print(data1.iloc[0, 0]) # df.iloc[行号,列号] 从零开始
data1['month'] = 12 # 新增一列
def pandas_dataFrame_merge_sql():
import pandas as pd
data1 = {'key': range(0, 5), 'd1': range(3, 8)}
df1 = pd.DataFrame(data1, index=list('abcde'))
data2 = {'key': range(6, 11), 'd2': range(13, 18)}
df2 = pd.DataFrame(data2, index=list('abcde'))
data3 = {'key': range(6, 11), 'd3': range(13, 18)}
df3 = pd.DataFrame(data3, index=list('abcde'))
result = pd.merge(df2, df1, how='left', on='key') # 这个merge一次只能合并两个表
print("data2和data1表连接,data2为基准:", result)
result = pd.merge(result, df3, how='left', on='key') # 假如多个key要保留,就写on=['key1', 'key2']
print("result和data3表连接,result为基准:", result)
# study_lambda()
# list_deduction()
# list_unpack_package()
# create_turple()
# dict_use()
# sorted()
# map_use()
# lower_upper_capitalize_title_swapcase()
# replace()
# regular_expression()
# python_sql()
pandas_dataFrame_merge_sql()
bp1.py:
import pandas as pd
import numpy as np
# 数据来源:https://tianchi.aliyun.com/dataset/dataDetail?dataId=46
# behavior_type: Including click, collect,add-to-cart and payment, the corresponding values are 1, 2, 3 and 4,respectively.
df = pd.read_csv("./data/tianchi_mobile_recommend_train_user.csv")
df = df[["user_id", "item_id", "behavior_type"]] # 将需要用的数据提取出来
df_addClick = df[df["behavior_type"] == 1].groupby(by=["user_id", "item_id"], as_index=False).count()
# 如同sql语句: select user_id, item_id, count(*) from data1 where behavior_type == 1
# group参数as_index=False加了就是把原来的'behavior_type'作为统计数据数量的那一列的标签,不然根本无法取统计结果
df_addClick.columns = ['user_id', 'item_id', 'count_click'] # 将原来的第三列统计数据的名字改为更准确的count_click
df_collect = df[df['behavior_type'] == 2]
df_collect.columns = ['user_id', 'item_id', 'collect']
# print(df_collect)
# input()
df_add_to_cart = df[df['behavior_type'] == 3]
df_add_to_cart = df[df['behavior_type'] == 3]
df_add_to_cart.columns = ['user_id', 'item_id', 'add_to_cart']
df_payment = df[df['behavior_type'] == 4]
df_payment.columns = ['user_id', 'item_id', 'payment']
df_result = pd.merge(df_addClick, df_collect, how='left', on=['user_id', 'item_id'])
df_result = pd.merge(df_result, df_add_to_cart, how='left', on=['user_id', 'item_id'])
df_result = pd.merge(df_result, df_payment, how='left', on=['user_id', 'item_id'])
# 把四个表连接起来
# df_addClick['collect'] = np.where((df['user_id'] == df_addClick['user_id'].squeeze()) and (df['item_id'] == df_addClick['item_id'].squeeze()), df['behavior_type'], 0)
# df_addClick_Collect = df_addClick
df_result.to_csv("./data/result1.csv")
# print(df_addBrowse['count_browse'])
# print(df[df["behavior_type"] == 4])