]> Git Repo - J-u-boot.git/blame - common/bouncebuf.c
Merge tag 'v2024.07-rc4' into next
[J-u-boot.git] / common / bouncebuf.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
b660df3c
MV
2/*
3 * Generic bounce buffer implementation
4 *
5 * Copyright (C) 2012 Marek Vasut <[email protected]>
b660df3c
MV
6 */
7
1eb69ae4 8#include <cpu_func.h>
f7ae49fc 9#include <log.h>
b660df3c
MV
10#include <malloc.h>
11#include <errno.h>
12#include <bouncebuf.h>
90526e9f 13#include <asm/cache.h>
b75ca26b 14#include <linux/dma-mapping.h>
b660df3c 15
84d35b28 16static int addr_aligned(struct bounce_buffer *state)
b660df3c
MV
17{
18 const ulong align_mask = ARCH_DMA_MINALIGN - 1;
19
20 /* Check if start is aligned */
84d35b28
SW
21 if ((ulong)state->user_buffer & align_mask) {
22 debug("Unaligned buffer address %p\n", state->user_buffer);
b660df3c
MV
23 return 0;
24 }
25
84d35b28
SW
26 /* Check if length is aligned */
27 if (state->len != state->len_aligned) {
5d69a5d1 28 debug("Unaligned buffer length %zu\n", state->len);
b660df3c
MV
29 return 0;
30 }
31
32 /* Aligned */
33 return 1;
34}
35
8074ffe3
MV
36int bounce_buffer_start_extalign(struct bounce_buffer *state, void *data,
37 size_t len, unsigned int flags,
38 size_t alignment,
39 int (*addr_is_aligned)(struct bounce_buffer *state))
b660df3c 40{
84d35b28
SW
41 state->user_buffer = data;
42 state->bounce_buffer = data;
43 state->len = len;
8074ffe3 44 state->len_aligned = roundup(len, alignment);
84d35b28
SW
45 state->flags = flags;
46
8074ffe3
MV
47 if (!addr_is_aligned(state)) {
48 state->bounce_buffer = memalign(alignment,
84d35b28
SW
49 state->len_aligned);
50 if (!state->bounce_buffer)
51 return -ENOMEM;
52
53 if (state->flags & GEN_BB_READ)
54 memcpy(state->bounce_buffer, state->user_buffer,
55 state->len);
b660df3c
MV
56 }
57
84d35b28
SW
58 /*
59 * Flush data to RAM so DMA reads can pick it up,
60 * and any CPU writebacks don't race with DMA writes
61 */
b75ca26b
AD
62 dma_map_single(state->bounce_buffer,
63 state->len_aligned,
64 DMA_BIDIRECTIONAL);
b660df3c
MV
65
66 return 0;
67}
68
8074ffe3
MV
69int bounce_buffer_start(struct bounce_buffer *state, void *data,
70 size_t len, unsigned int flags)
71{
72 return bounce_buffer_start_extalign(state, data, len, flags,
73 ARCH_DMA_MINALIGN,
74 addr_aligned);
75}
76
84d35b28 77int bounce_buffer_stop(struct bounce_buffer *state)
b660df3c 78{
84d35b28
SW
79 if (state->flags & GEN_BB_WRITE) {
80 /* Invalidate cache so that CPU can see any newly DMA'd data */
3f9cff66 81 dma_unmap_single((dma_addr_t)(uintptr_t)state->bounce_buffer,
b75ca26b
AD
82 state->len_aligned,
83 DMA_BIDIRECTIONAL);
84d35b28 84 }
b660df3c 85
84d35b28 86 if (state->bounce_buffer == state->user_buffer)
b660df3c
MV
87 return 0;
88
84d35b28
SW
89 if (state->flags & GEN_BB_WRITE)
90 memcpy(state->user_buffer, state->bounce_buffer, state->len);
b660df3c 91
84d35b28 92 free(state->bounce_buffer);
b660df3c
MV
93
94 return 0;
95}
This page took 0.378275 seconds and 4 git commands to generate.