机器学习常见函数

couter:

from collections import Counter
>>> c = Counter('abcdeabcdabcaba')  # count elements from a string
>>> c
Counter({'a': 5, 'b': 4, 'c': 3, 'd': 2, 'e': 1}) # sorted automatically

>>> c.most_common(3)                # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c)                       # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements()))   # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values())                 # total of all counts
15

>>> c['a']                          # count of letter 'a'
5
>>> for elem in 'shazam':           # update counts from an iterable
...     c[elem] += 1                # by adding 1 to each element's count
>>> c['a']                          # now there are seven 'a'
7
>>> del c['b']                      # remove all 'b'
>>> c['b']                          # now there are zero 'b'
0

>>> d = Counter('simsalabim')       # make another counter
>>> c.update(d)                     # add in the second counter
>>> c['a']                          # now there are nine 'a'
9

>>> c.clear()                       # empty the counter
>>> c
Counter()

Note:  If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:

>>> c = Counter('aaabbc')
>>> c['b'] -= 2                     # reduce the count of 'b' by two
>>> c.most_common()                 # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]

enumerate:

a = [1,2,3,4]
for i in enumerate(a):
    print(i)
(0, 1)
(1, 2)
(2, 3)
(3, 4)
#-----------------------
b = [4,3,2,1]
for j in enumerate(b):
    print(j)
(0, 4)
(1, 3)
(2, 2)
(3, 1)

类继承

class WordEmbeddingDataset(tud.Dataset):
    def __init__(self,text,word_to_idx,idx_to_word,word_freq,word_counts):
        super(WordEmbeddingDataset,self).__init__()

类的继承

multinomial

torch.multinomial(input, num_samples,replacement=False, out=None) ->LongTensor

from torch import multinomial
import torch
a_words = ['a','b','c','d','e','f','g']
a = [1,2,3,5,6,7,10,0]
a = torch.Tensor(a)
res = multinomial(a,num_samples=8,replacement=False)
print(res)
->RuntimeError: invalid multinomial distribution (with replacement=False, not enough non-negative category to sample)
# non-negative 只能取非负数-------------------------------------
a = [[1,2,3,5,6,7,10,0],[1,9,2,4,5,6,7,2]]
a = torch.FloatTensor(a)
res = multinomial(a,num_samples=7,replacement=False)
print(res)
->tensor([[3, 6, 1, 4, 5, 2, 0],
        [1, 2, 4, 7, 3, 5, 6]])
# a有两行,按照a元素大小,每行取num_samples(7)个元素,形成2*7的tensor-------
a = [1,2,1,2,3,1,1,1,100]
a = torch.FloatTensor(a)
res = multinomial(a,num_samples=10,replacement=True)
print(res)
->tensor([8, 8, 8, 8, 8, 8, 8, 8, 8, 8])
# replacement=True代表有放回,此时100很大,基本都返回的是100的下标

https://blog.csdn.net/monchin/article/details/79787621

squeeze()和unsqueeze()

import torch as t
a = t.arange(0,6)
b = a.view(2,3)
print('b:',b,b.shape)
c = b.unsqueeze(1)   # 在第一个位置插入1维
print('c:',c,c.shape)
d = c.squeeze(2) # 想在第二个位置删去某个维度,但是dim=3,而squeeze只能删去dim=1的维
print('d1',d,d.shape)
d = c.squeeze(1) # 删去
print('d2',d,d.shape)

> b: tensor([[0, 1, 2],
        [3, 4, 5]]) torch.Size([2, 3])
c: tensor([[[0, 1, 2]],

        [[3, 4, 5]]]) torch.Size([2, 1, 3])
d1 tensor([[[0, 1, 2]],

        [[3, 4, 5]]]) torch.Size([2, 1, 3])
d2 tensor([[0, 1, 2],
        [3, 4, 5]]) torch.Size([2, 3])

Torch norm

我觉得他讲得好

自动求导机制

我觉得他讲得好

发布了45 篇原创文章 · 获赞 0 · 访问量 997

猜你喜欢

转载自blog.csdn.net/jokerxsy/article/details/104605078
今日推荐