@@ -145,6 +145,7 @@ int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
bool dma_addressing_limited(struct device *dev);
+bool dma_recommend_may_block(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
size_t dma_opt_mapping_size(struct device *dev);
unsigned long dma_get_merge_boundary(struct device *dev);
@@ -252,6 +253,10 @@ static inline bool dma_addressing_limited(struct device *dev)
{
return false;
}
+static inline bool dma_recommend_may_block(struct device *dev)
+{
+ return false;
+}
static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
@@ -649,6 +649,12 @@ bool dma_direct_all_ram_mapped(struct device *dev)
check_ram_in_range_map);
}
+bool dma_direct_recommend_may_block(struct device *dev)
+{
+ return IS_ENABLED(CONFIG_SWIOTLB_THROTTLE) &&
+ is_swiotlb_force_bounce(dev);
+}
+
size_t dma_direct_max_mapping_size(struct device *dev)
{
/* If SWIOTLB is active, use its maximum mapping size */
@@ -21,6 +21,7 @@ bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs);
bool dma_direct_all_ram_mapped(struct device *dev);
+bool dma_direct_recommend_may_block(struct device *dev);
size_t dma_direct_max_mapping_size(struct device *dev);
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
@@ -858,6 +858,16 @@ bool dma_addressing_limited(struct device *dev)
}
EXPORT_SYMBOL_GPL(dma_addressing_limited);
+bool dma_recommend_may_block(struct device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (dma_map_direct(dev, ops))
+ return dma_direct_recommend_may_block(dev);
+ return false;
+}
+EXPORT_SYMBOL_GPL(dma_recommend_may_block);
+
size_t dma_max_mapping_size(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);