diff options
author | David Howells <dhowells@redhat.com> | 2022-10-21 14:39:34 +0200 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2022-12-01 14:36:38 +0100 |
commit | 47c810a79844462d3468d831edc00971757693e0 (patch) | |
tree | 631be82379497ed224fa4038faa9920e9a0f34c3 /net/rxrpc/peer_event.c | |
parent | rxrpc: trace: Don't use __builtin_return_address for rxrpc_local tracing (diff) | |
download | linux-47c810a79844462d3468d831edc00971757693e0.tar.xz linux-47c810a79844462d3468d831edc00971757693e0.zip |
rxrpc: trace: Don't use __builtin_return_address for rxrpc_peer tracing
In rxrpc tracing, use enums to generate lists of points of interest rather
than __builtin_return_address() for the rxrpc_peer tracepoint
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
Diffstat (limited to 'net/rxrpc/peer_event.c')
-rw-r--r-- | net/rxrpc/peer_event.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 3f8d104ecaa7..5e97d321ac38 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -168,7 +168,7 @@ void rxrpc_error_report(struct sock *sk) } peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx); - if (peer && !rxrpc_get_peer_maybe(peer)) + if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input_error)) peer = NULL; if (!peer) { rcu_read_unlock(); @@ -190,7 +190,7 @@ void rxrpc_error_report(struct sock *sk) out: rcu_read_unlock(); rxrpc_free_skb(skb, rxrpc_skb_freed); - rxrpc_put_peer(peer); + rxrpc_put_peer(peer, rxrpc_peer_put_input_error); _leave(""); } @@ -263,7 +263,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, struct rxrpc_peer, keepalive_link); list_del_init(&peer->keepalive_link); - if (!rxrpc_get_peer_maybe(peer)) + if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive)) continue; if (__rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive)) { @@ -291,7 +291,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, &rxnet->peer_keepalive[slot & mask]); rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive); } - rxrpc_put_peer_locked(peer); + rxrpc_put_peer_locked(peer, rxrpc_peer_put_keepalive); } spin_unlock_bh(&rxnet->peer_hash_lock); |